a73x

4f15ba85

Add partial instance buffer uploads

a73x   2026-04-08 19:29


diff --git a/src/renderer.zig b/src/renderer.zig
index 9b17ae6..64f5ece 100644
--- a/src/renderer.zig
+++ b/src/renderer.zig
@@ -330,6 +330,11 @@ const InstanceUploadDecision = struct {
    upload_mode: InstanceUploadMode,
};

const InstanceRangeWrite = struct {
    byte_offset: vk.DeviceSize,
    byte_len: vk.DeviceSize,
};

fn planInstanceUpload(req: InstanceUploadRequest) InstanceUploadDecision {
    const needed_capacity = std.math.add(u32, req.offset_instances, req.write_len) catch {
        return .{
@@ -343,6 +348,32 @@ fn planInstanceUpload(req: InstanceUploadRequest) InstanceUploadDecision {
    };
}

fn planInstanceRangeWrite(offset_instances: u32, len_instances: u32) InstanceRangeWrite {
    return .{
        .byte_offset = @as(vk.DeviceSize, offset_instances) * @sizeOf(Instance),
        .byte_len = @as(vk.DeviceSize, len_instances) * @sizeOf(Instance),
    };
}

fn writeInstanceRange(
    target: []Instance,
    offset_instances: u32,
    instances: []const Instance,
) !void {
    const decision = planInstanceUpload(.{
        .current_capacity = std.math.cast(u32, target.len) orelse return error.InvalidInstanceRange,
        .offset_instances = offset_instances,
        .write_len = std.math.cast(u32, instances.len) orelse return error.InvalidInstanceRange,
    });
    switch (decision.upload_mode) {
        .invalid_range, .full => return error.InvalidInstanceRange,
        .partial => {},
    }

    const offset: usize = @intCast(offset_instances);
    @memcpy(target[offset .. offset + instances.len], instances);
}

fn swapchainNeedsRebuild(result: vk.Result) bool {
    return result == .suboptimal_khr;
}
@@ -1223,6 +1254,39 @@ pub const Context = struct {
        self.vkd.unmapMemory(self.device, self.instance_memory);
    }

    /// Upload a contiguous instance subrange when the existing buffer is large enough.
    /// Returns true when the caller must fall back to a full upload instead.
    pub fn uploadInstanceRange(
        self: *Context,
        offset_instances: u32,
        instances: []const Instance,
    ) !bool {
        const decision = planInstanceUpload(.{
            .current_capacity = self.instance_capacity,
            .offset_instances = offset_instances,
            .write_len = std.math.cast(u32, instances.len) orelse return error.InvalidInstanceRange,
        });
        switch (decision.upload_mode) {
            .invalid_range => return error.InvalidInstanceRange,
            .full => return true,
            .partial => {},
        }

        if (instances.len == 0) return false;

        const range = planInstanceRangeWrite(offset_instances, @intCast(instances.len));
        const mapped = try self.vkd.mapMemory(
            self.device,
            self.instance_memory,
            range.byte_offset,
            range.byte_len,
            .{},
        );
        @memcpy(@as([*]Instance, @ptrCast(@alignCast(mapped)))[0..instances.len], instances);
        self.vkd.unmapMemory(self.device, self.instance_memory);
        return false;
    }

    /// Full draw pass: bind pipeline, push constants, vertex + instance buffers, draw, present.
    pub fn drawCells(
        self: *Context,
@@ -1430,3 +1494,56 @@ test "range upload reports overflow explicitly" {
    try std.testing.expectEqual(@as(?u32, null), decision.needed_capacity);
    try std.testing.expectEqual(InstanceUploadMode.invalid_range, decision.upload_mode);
}

fn testInstance(seed: f32) Instance {
    return .{
        .cell_pos = .{ seed, seed + 1.0 },
        .glyph_size = .{ seed + 2.0, seed + 3.0 },
        .glyph_bearing = .{ seed + 4.0, seed + 5.0 },
        .uv_rect = .{ seed + 6.0, seed + 7.0, seed + 8.0, seed + 9.0 },
        .fg = .{ seed + 10.0, seed + 11.0, seed + 12.0, seed + 13.0 },
        .bg = .{ seed + 14.0, seed + 15.0, seed + 16.0, seed + 17.0 },
    };
}

test "uploadInstanceRangeWrite computes byte offset from instance offset" {
    const write = planInstanceRangeWrite(3, 2);
    try std.testing.expectEqual(@as(vk.DeviceSize, 3 * @sizeOf(Instance)), write.byte_offset);
    try std.testing.expectEqual(@as(vk.DeviceSize, 2 * @sizeOf(Instance)), write.byte_len);
}

test "writeInstanceRange overwrites only the requested window" {
    var target = [_]Instance{
        testInstance(0),
        testInstance(20),
        testInstance(40),
        testInstance(60),
    };
    const replacement = [_]Instance{
        testInstance(100),
        testInstance(120),
    };

    try writeInstanceRange(target[0..], 1, replacement[0..]);

    try std.testing.expectEqualDeep(testInstance(0), target[0]);
    try std.testing.expectEqualDeep(testInstance(100), target[1]);
    try std.testing.expectEqualDeep(testInstance(120), target[2]);
    try std.testing.expectEqualDeep(testInstance(60), target[3]);
}

test "writeInstanceRange rejects writes past the backing slice" {
    var target = [_]Instance{
        testInstance(0),
        testInstance(20),
    };
    const replacement = [_]Instance{
        testInstance(100),
        testInstance(120),
    };

    try std.testing.expectError(
        error.InvalidInstanceRange,
        writeInstanceRange(target[0..], 1, replacement[0..]),
    );
}