1const std = @import("std");
2const mem = std.mem;
3const assert = std.debug.assert;
4const Allocator = std.mem.Allocator;
5const fs = std.fs;
6
7const C = @This();
8const Module = @import("../Module.zig");
9const Compilation = @import("../Compilation.zig");
10const codegen = @import("../codegen/c.zig");
11const link = @import("../link.zig");
12const trace = @import("../tracy.zig").trace;
13const Type = @import("../type.zig").Type;
14const Air = @import("../Air.zig");
15const Liveness = @import("../Liveness.zig");
16
17pub const base_tag: link.File.Tag = .c;
18pub const zig_h = @embedFile("C/zig.h");
19
20base: link.File,
21/// This linker backend does not try to incrementally link output C source code.
22/// Instead, it tracks all declarations in this table, and iterates over it
23/// in the flush function, stitching pre-rendered pieces of C code together.
24decl_table: std.AutoArrayHashMapUnmanaged(*const Module.Decl, DeclBlock) = .{},
25/// Stores Type/Value data for `typedefs` to reference.
26/// Accumulates allocations and then there is a periodic garbage collection after flush().
27arena: std.heap.ArenaAllocator,
28
29/// Per-declaration data.
30const DeclBlock = struct {
31    code: std.ArrayListUnmanaged(u8) = .{},
32    fwd_decl: std.ArrayListUnmanaged(u8) = .{},
33    /// Each Decl stores a mapping of Zig Types to corresponding C types, for every
34    /// Zig Type used by the Decl. In flush(), we iterate over each Decl
35    /// and emit the typedef code for all types, making sure to not emit the same thing twice.
36    /// Any arena memory the Type points to lives in the `arena` field of `C`.
37    typedefs: codegen.TypedefMap.Unmanaged = .{},
38
39    fn deinit(db: *DeclBlock, gpa: Allocator) void {
40        db.code.deinit(gpa);
41        db.fwd_decl.deinit(gpa);
42        for (db.typedefs.values()) |typedef| {
43            gpa.free(typedef.rendered);
44        }
45        db.typedefs.deinit(gpa);
46        db.* = undefined;
47    }
48};
49
50pub fn openPath(gpa: Allocator, sub_path: []const u8, options: link.Options) !*C {
51    assert(options.object_format == .c);
52
53    if (options.use_llvm) return error.LLVMHasNoCBackend;
54    if (options.use_lld) return error.LLDHasNoCBackend;
55
56    const file = try options.emit.?.directory.handle.createFile(sub_path, .{
57        // Truncation is done on `flush`.
58        .truncate = false,
59        .mode = link.determineMode(options),
60    });
61    errdefer file.close();
62
63    var c_file = try gpa.create(C);
64    errdefer gpa.destroy(c_file);
65
66    c_file.* = C{
67        .arena = std.heap.ArenaAllocator.init(gpa),
68        .base = .{
69            .tag = .c,
70            .options = options,
71            .file = file,
72            .allocator = gpa,
73        },
74    };
75
76    return c_file;
77}
78
79pub fn deinit(self: *C) void {
80    const gpa = self.base.allocator;
81
82    for (self.decl_table.values()) |*db| {
83        db.deinit(gpa);
84    }
85    self.decl_table.deinit(gpa);
86
87    self.arena.deinit();
88}
89
90pub fn freeDecl(self: *C, decl: *Module.Decl) void {
91    const gpa = self.base.allocator;
92    if (self.decl_table.fetchSwapRemove(decl)) |*kv| {
93        kv.value.deinit(gpa);
94    }
95}
96
97pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
98    const tracy = trace(@src());
99    defer tracy.end();
100
101    const decl = func.owner_decl;
102    const gop = try self.decl_table.getOrPut(self.base.allocator, decl);
103    if (!gop.found_existing) {
104        gop.value_ptr.* = .{};
105    }
106    const fwd_decl = &gop.value_ptr.fwd_decl;
107    const typedefs = &gop.value_ptr.typedefs;
108    const code = &gop.value_ptr.code;
109    fwd_decl.shrinkRetainingCapacity(0);
110    {
111        for (typedefs.values()) |value| {
112            module.gpa.free(value.rendered);
113        }
114    }
115    typedefs.clearRetainingCapacity();
116    code.shrinkRetainingCapacity(0);
117
118    var function: codegen.Function = .{
119        .value_map = codegen.CValueMap.init(module.gpa),
120        .air = air,
121        .liveness = liveness,
122        .func = func,
123        .object = .{
124            .dg = .{
125                .gpa = module.gpa,
126                .module = module,
127                .error_msg = null,
128                .decl = decl,
129                .fwd_decl = fwd_decl.toManaged(module.gpa),
130                .typedefs = typedefs.promote(module.gpa),
131                .typedefs_arena = self.arena.allocator(),
132            },
133            .code = code.toManaged(module.gpa),
134            .indent_writer = undefined, // set later so we can get a pointer to object.code
135        },
136    };
137
138    function.object.indent_writer = .{ .underlying_writer = function.object.code.writer() };
139    defer {
140        function.value_map.deinit();
141        function.blocks.deinit(module.gpa);
142        function.object.code.deinit();
143        function.object.dg.fwd_decl.deinit();
144        for (function.object.dg.typedefs.values()) |value| {
145            module.gpa.free(value.rendered);
146        }
147        function.object.dg.typedefs.deinit();
148    }
149
150    codegen.genFunc(&function) catch |err| switch (err) {
151        error.AnalysisFail => {
152            try module.failed_decls.put(module.gpa, decl, function.object.dg.error_msg.?);
153            return;
154        },
155        else => |e| return e,
156    };
157
158    fwd_decl.* = function.object.dg.fwd_decl.moveToUnmanaged();
159    typedefs.* = function.object.dg.typedefs.unmanaged;
160    function.object.dg.typedefs.unmanaged = .{};
161    code.* = function.object.code.moveToUnmanaged();
162
163    // Free excess allocated memory for this Decl.
164    fwd_decl.shrinkAndFree(module.gpa, fwd_decl.items.len);
165    code.shrinkAndFree(module.gpa, code.items.len);
166}
167
168pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
169    const tracy = trace(@src());
170    defer tracy.end();
171
172    const gop = try self.decl_table.getOrPut(self.base.allocator, decl);
173    if (!gop.found_existing) {
174        gop.value_ptr.* = .{};
175    }
176    const fwd_decl = &gop.value_ptr.fwd_decl;
177    const typedefs = &gop.value_ptr.typedefs;
178    const code = &gop.value_ptr.code;
179    fwd_decl.shrinkRetainingCapacity(0);
180    {
181        for (typedefs.values()) |value| {
182            module.gpa.free(value.rendered);
183        }
184    }
185    typedefs.clearRetainingCapacity();
186    code.shrinkRetainingCapacity(0);
187
188    var object: codegen.Object = .{
189        .dg = .{
190            .gpa = module.gpa,
191            .module = module,
192            .error_msg = null,
193            .decl = decl,
194            .fwd_decl = fwd_decl.toManaged(module.gpa),
195            .typedefs = typedefs.promote(module.gpa),
196            .typedefs_arena = self.arena.allocator(),
197        },
198        .code = code.toManaged(module.gpa),
199        .indent_writer = undefined, // set later so we can get a pointer to object.code
200    };
201    object.indent_writer = .{ .underlying_writer = object.code.writer() };
202    defer {
203        object.code.deinit();
204        object.dg.fwd_decl.deinit();
205        for (object.dg.typedefs.values()) |value| {
206            module.gpa.free(value.rendered);
207        }
208        object.dg.typedefs.deinit();
209    }
210
211    codegen.genDecl(&object) catch |err| switch (err) {
212        error.AnalysisFail => {
213            try module.failed_decls.put(module.gpa, decl, object.dg.error_msg.?);
214            return;
215        },
216        else => |e| return e,
217    };
218
219    fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
220    typedefs.* = object.dg.typedefs.unmanaged;
221    object.dg.typedefs.unmanaged = .{};
222    code.* = object.code.moveToUnmanaged();
223
224    // Free excess allocated memory for this Decl.
225    fwd_decl.shrinkAndFree(module.gpa, fwd_decl.items.len);
226    code.shrinkAndFree(module.gpa, code.items.len);
227}
228
229pub fn updateDeclLineNumber(self: *C, module: *Module, decl: *Module.Decl) !void {
230    // The C backend does not have the ability to fix line numbers without re-generating
231    // the entire Decl.
232    _ = self;
233    _ = module;
234    _ = decl;
235}
236
237pub fn flush(self: *C, comp: *Compilation) !void {
238    return self.flushModule(comp);
239}
240
241pub fn flushModule(self: *C, comp: *Compilation) !void {
242    const tracy = trace(@src());
243    defer tracy.end();
244
245    const gpa = comp.gpa;
246    const module = self.base.options.module.?;
247
248    // This code path happens exclusively with -ofmt=c. The flush logic for
249    // emit-h is in `flushEmitH` below.
250
251    var f: Flush = .{};
252    defer f.deinit(gpa);
253
254    // Covers zig.h and err_typedef_item.
255    try f.all_buffers.ensureUnusedCapacity(gpa, 2);
256
257    f.all_buffers.appendAssumeCapacity(.{
258        .iov_base = zig_h,
259        .iov_len = zig_h.len,
260    });
261    f.file_size += zig_h.len;
262
263    const err_typedef_writer = f.err_typedef_buf.writer(gpa);
264    const err_typedef_index = f.all_buffers.items.len;
265    f.all_buffers.items.len += 1;
266
267    render_errors: {
268        if (module.global_error_set.size == 0) break :render_errors;
269        var it = module.global_error_set.iterator();
270        while (it.next()) |entry| {
271            try err_typedef_writer.print("#define zig_error_{s} {d}\n", .{ entry.key_ptr.*, entry.value_ptr.* });
272        }
273        try err_typedef_writer.writeByte('\n');
274    }
275
276    // Typedefs, forward decls, and non-functions first.
277    // Unlike other backends, the .c code we are emitting is order-dependent. Therefore
278    // we must traverse the set of Decls that we are emitting according to their dependencies.
279    // Our strategy is to populate a set of remaining decls, pop Decls one by one,
280    // recursively chasing their dependencies.
281    try f.remaining_decls.ensureUnusedCapacity(gpa, self.decl_table.count());
282
283    const decl_keys = self.decl_table.keys();
284    const decl_values = self.decl_table.values();
285    for (decl_keys) |decl| {
286        assert(decl.has_tv);
287        f.remaining_decls.putAssumeCapacityNoClobber(decl, {});
288    }
289
290    while (f.remaining_decls.popOrNull()) |kv| {
291        const decl = kv.key;
292        try flushDecl(self, &f, decl);
293    }
294
295    f.all_buffers.items[err_typedef_index] = .{
296        .iov_base = f.err_typedef_buf.items.ptr,
297        .iov_len = f.err_typedef_buf.items.len,
298    };
299    f.file_size += f.err_typedef_buf.items.len;
300
301    // Now the function bodies.
302    try f.all_buffers.ensureUnusedCapacity(gpa, f.fn_count);
303    for (decl_keys) |decl, i| {
304        if (decl.getFunction() != null) {
305            const decl_block = &decl_values[i];
306            const buf = decl_block.code.items;
307            if (buf.len != 0) {
308                f.all_buffers.appendAssumeCapacity(.{
309                    .iov_base = buf.ptr,
310                    .iov_len = buf.len,
311                });
312                f.file_size += buf.len;
313            }
314        }
315    }
316
317    const file = self.base.file.?;
318    try file.setEndPos(f.file_size);
319    try file.pwritevAll(f.all_buffers.items, 0);
320}
321
322const Flush = struct {
323    remaining_decls: std.AutoArrayHashMapUnmanaged(*const Module.Decl, void) = .{},
324    typedefs: Typedefs = .{},
325    err_typedef_buf: std.ArrayListUnmanaged(u8) = .{},
326    /// We collect a list of buffers to write, and write them all at once with pwritev ��
327    all_buffers: std.ArrayListUnmanaged(std.os.iovec_const) = .{},
328    /// Keeps track of the total bytes of `all_buffers`.
329    file_size: u64 = 0,
330    fn_count: usize = 0,
331
332    const Typedefs = std.HashMapUnmanaged(
333        Type,
334        void,
335        Type.HashContext64,
336        std.hash_map.default_max_load_percentage,
337    );
338
339    fn deinit(f: *Flush, gpa: Allocator) void {
340        f.all_buffers.deinit(gpa);
341        f.err_typedef_buf.deinit(gpa);
342        f.typedefs.deinit(gpa);
343        f.remaining_decls.deinit(gpa);
344    }
345};
346
347const FlushDeclError = error{
348    OutOfMemory,
349};
350
351/// Assumes `decl` was in the `remaining_decls` set, and has already been removed.
352fn flushDecl(self: *C, f: *Flush, decl: *const Module.Decl) FlushDeclError!void {
353    // Before flushing any particular Decl we must ensure its
354    // dependencies are already flushed, so that the order in the .c
355    // file comes out correctly.
356    for (decl.dependencies.keys()) |dep| {
357        if (f.remaining_decls.swapRemove(dep)) {
358            try flushDecl(self, f, dep);
359        }
360    }
361
362    const decl_block = self.decl_table.getPtr(decl).?;
363    const gpa = self.base.allocator;
364
365    if (decl_block.typedefs.count() != 0) {
366        try f.typedefs.ensureUnusedCapacity(gpa, @intCast(u32, decl_block.typedefs.count()));
367        var it = decl_block.typedefs.iterator();
368        while (it.next()) |new| {
369            const gop = f.typedefs.getOrPutAssumeCapacity(new.key_ptr.*);
370            if (!gop.found_existing) {
371                try f.err_typedef_buf.appendSlice(gpa, new.value_ptr.rendered);
372            }
373        }
374    }
375
376    if (decl_block.fwd_decl.items.len != 0) {
377        const buf = decl_block.fwd_decl.items;
378        try f.all_buffers.append(gpa, .{
379            .iov_base = buf.ptr,
380            .iov_len = buf.len,
381        });
382        f.file_size += buf.len;
383    }
384    if (decl.getFunction() != null) {
385        f.fn_count += 1;
386    } else if (decl_block.code.items.len != 0) {
387        const buf = decl_block.code.items;
388        try f.all_buffers.append(gpa, .{
389            .iov_base = buf.ptr,
390            .iov_len = buf.len,
391        });
392        f.file_size += buf.len;
393    }
394}
395
396pub fn flushEmitH(module: *Module) !void {
397    const tracy = trace(@src());
398    defer tracy.end();
399
400    const emit_h = module.emit_h orelse return;
401
402    // We collect a list of buffers to write, and write them all at once with pwritev ��
403    const num_buffers = emit_h.decl_table.count() + 1;
404    var all_buffers = try std.ArrayList(std.os.iovec_const).initCapacity(module.gpa, num_buffers);
405    defer all_buffers.deinit();
406
407    var file_size: u64 = zig_h.len;
408    all_buffers.appendAssumeCapacity(.{
409        .iov_base = zig_h,
410        .iov_len = zig_h.len,
411    });
412
413    for (emit_h.decl_table.keys()) |decl| {
414        const decl_emit_h = decl.getEmitH(module);
415        const buf = decl_emit_h.fwd_decl.items;
416        all_buffers.appendAssumeCapacity(.{
417            .iov_base = buf.ptr,
418            .iov_len = buf.len,
419        });
420        file_size += buf.len;
421    }
422
423    const directory = emit_h.loc.directory orelse module.comp.local_cache_directory;
424    const file = try directory.handle.createFile(emit_h.loc.basename, .{
425        // We set the end position explicitly below; by not truncating the file, we possibly
426        // make it easier on the file system by doing 1 reallocation instead of two.
427        .truncate = false,
428    });
429    defer file.close();
430
431    try file.setEndPos(file_size);
432    try file.pwritevAll(all_buffers.items, 0);
433}
434
435pub fn updateDeclExports(
436    self: *C,
437    module: *Module,
438    decl: *Module.Decl,
439    exports: []const *Module.Export,
440) !void {
441    _ = exports;
442    _ = decl;
443    _ = module;
444    _ = self;
445}
446