1//! Semantic analysis of ZIR instructions. 2//! Shared to every Block. Stored on the stack. 3//! State used for compiling a ZIR into AIR. 4//! Transforms untyped ZIR instructions into semantically-analyzed AIR instructions. 5//! Does type checking, comptime control flow, and safety-check generation. 6//! This is the the heart of the Zig compiler. 7 8mod: *Module, 9/// Alias to `mod.gpa`. 10gpa: Allocator, 11/// Points to the temporary arena allocator of the Sema. 12/// This arena will be cleared when the sema is destroyed. 13arena: Allocator, 14/// Points to the arena allocator for the owner_decl. 15/// This arena will persist until the decl is invalidated. 16perm_arena: Allocator, 17code: Zir, 18air_instructions: std.MultiArrayList(Air.Inst) = .{}, 19air_extra: std.ArrayListUnmanaged(u32) = .{}, 20air_values: std.ArrayListUnmanaged(Value) = .{}, 21/// Maps ZIR to AIR. 22inst_map: InstMap = .{}, 23/// When analyzing an inline function call, owner_decl is the Decl of the caller 24/// and `src_decl` of `Block` is the `Decl` of the callee. 25/// This `Decl` owns the arena memory of this `Sema`. 26owner_decl: *Decl, 27/// For an inline or comptime function call, this will be the root parent function 28/// which contains the callsite. Corresponds to `owner_decl`. 29owner_func: ?*Module.Fn, 30/// The function this ZIR code is the body of, according to the source code. 31/// This starts out the same as `owner_func` and then diverges in the case of 32/// an inline or comptime function call. 33func: ?*Module.Fn, 34/// When semantic analysis needs to know the return type of the function whose body 35/// is being analyzed, this `Type` should be used instead of going through `func`. 36/// This will correctly handle the case of a comptime/inline function call of a 37/// generic function which uses a type expression for the return type. 38/// The type will be `void` in the case that `func` is `null`. 39fn_ret_ty: Type, 40branch_quota: u32 = 1000, 41branch_count: u32 = 0, 42/// This field is updated when a new source location becomes active, so that 43/// instructions which do not have explicitly mapped source locations still have 44/// access to the source location set by the previous instruction which did 45/// contain a mapped source location. 46src: LazySrcLoc = .{ .token_offset = 0 }, 47decl_val_table: std.AutoHashMapUnmanaged(*Decl, Air.Inst.Ref) = .{}, 48/// When doing a generic function instantiation, this array collects a 49/// `Value` object for each parameter that is comptime known and thus elided 50/// from the generated function. This memory is allocated by a parent `Sema` and 51/// owned by the values arena of the Sema owner_decl. 52comptime_args: []TypedValue = &.{}, 53/// Marks the function instruction that `comptime_args` applies to so that we 54/// don't accidentally apply it to a function prototype which is used in the 55/// type expression of a generic function parameter. 56comptime_args_fn_inst: Zir.Inst.Index = 0, 57/// When `comptime_args` is provided, this field is also provided. It was used as 58/// the key in the `monomorphed_funcs` set. The `func` instruction is supposed 59/// to use this instead of allocating a fresh one. This avoids an unnecessary 60/// extra hash table lookup in the `monomorphed_funcs` set. 61/// Sema will set this to null when it takes ownership. 62preallocated_new_func: ?*Module.Fn = null, 63 64const std = @import("std"); 65const mem = std.mem; 66const Allocator = std.mem.Allocator; 67const assert = std.debug.assert; 68const log = std.log.scoped(.sema); 69 70const Sema = @This(); 71const Value = @import("value.zig").Value; 72const Type = @import("type.zig").Type; 73const TypedValue = @import("TypedValue.zig"); 74const Air = @import("Air.zig"); 75const Zir = @import("Zir.zig"); 76const Module = @import("Module.zig"); 77const trace = @import("tracy.zig").trace; 78const Namespace = Module.Namespace; 79const CompileError = Module.CompileError; 80const SemaError = Module.SemaError; 81const Decl = Module.Decl; 82const CaptureScope = Module.CaptureScope; 83const WipCaptureScope = Module.WipCaptureScope; 84const LazySrcLoc = Module.LazySrcLoc; 85const RangeSet = @import("RangeSet.zig"); 86const target_util = @import("target.zig"); 87const Package = @import("Package.zig"); 88const crash_report = @import("crash_report.zig"); 89 90pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Ref); 91 92/// This is the context needed to semantically analyze ZIR instructions and 93/// produce AIR instructions. 94/// This is a temporary structure stored on the stack; references to it are valid only 95/// during semantic analysis of the block. 96pub const Block = struct { 97 parent: ?*Block, 98 /// Shared among all child blocks. 99 sema: *Sema, 100 /// This Decl is the Decl according to the Zig source code corresponding to this Block. 101 /// This can vary during inline or comptime function calls. See `Sema.owner_decl` 102 /// for the one that will be the same for all Block instances. 103 src_decl: *Decl, 104 /// The namespace to use for lookups from this source block 105 /// When analyzing fields, this is different from src_decl.src_namepsace. 106 namespace: *Namespace, 107 /// The AIR instructions generated for this block. 108 instructions: std.ArrayListUnmanaged(Air.Inst.Index), 109 // `param` instructions are collected here to be used by the `func` instruction. 110 params: std.ArrayListUnmanaged(Param) = .{}, 111 112 wip_capture_scope: *CaptureScope, 113 114 label: ?*Label = null, 115 inlining: ?*Inlining, 116 /// If runtime_index is not 0 then one of these is guaranteed to be non null. 117 runtime_cond: ?LazySrcLoc = null, 118 runtime_loop: ?LazySrcLoc = null, 119 /// Non zero if a non-inline loop or a runtime conditional have been encountered. 120 /// Stores to to comptime variables are only allowed when var.runtime_index <= runtime_index. 121 runtime_index: u32 = 0, 122 123 is_comptime: bool, 124 125 /// when null, it is determined by build mode, changed by @setRuntimeSafety 126 want_safety: ?bool = null, 127 128 c_import_buf: ?*std.ArrayList(u8) = null, 129 130 const Param = struct { 131 /// `noreturn` means `anytype`. 132 ty: Type, 133 is_comptime: bool, 134 }; 135 136 /// This `Block` maps a block ZIR instruction to the corresponding 137 /// AIR instruction for break instruction analysis. 138 pub const Label = struct { 139 zir_block: Zir.Inst.Index, 140 merges: Merges, 141 }; 142 143 /// This `Block` indicates that an inline function call is happening 144 /// and return instructions should be analyzed as a break instruction 145 /// to this AIR block instruction. 146 /// It is shared among all the blocks in an inline or comptime called 147 /// function. 148 pub const Inlining = struct { 149 comptime_result: Air.Inst.Ref, 150 merges: Merges, 151 }; 152 153 pub const Merges = struct { 154 block_inst: Air.Inst.Index, 155 /// Separate array list from break_inst_list so that it can be passed directly 156 /// to resolvePeerTypes. 157 results: std.ArrayListUnmanaged(Air.Inst.Ref), 158 /// Keeps track of the break instructions so that the operand can be replaced 159 /// if we need to add type coercion at the end of block analysis. 160 /// Same indexes, capacity, length as `results`. 161 br_list: std.ArrayListUnmanaged(Air.Inst.Index), 162 }; 163 164 /// For debugging purposes. 165 pub fn dump(block: *Block, mod: Module) void { 166 Zir.dumpBlock(mod, block); 167 } 168 169 pub fn makeSubBlock(parent: *Block) Block { 170 return .{ 171 .parent = parent, 172 .sema = parent.sema, 173 .src_decl = parent.src_decl, 174 .namespace = parent.namespace, 175 .instructions = .{}, 176 .wip_capture_scope = parent.wip_capture_scope, 177 .label = null, 178 .inlining = parent.inlining, 179 .is_comptime = parent.is_comptime, 180 .runtime_cond = parent.runtime_cond, 181 .runtime_loop = parent.runtime_loop, 182 .runtime_index = parent.runtime_index, 183 .want_safety = parent.want_safety, 184 .c_import_buf = parent.c_import_buf, 185 }; 186 } 187 188 pub fn wantSafety(block: *const Block) bool { 189 return block.want_safety orelse switch (block.sema.mod.optimizeMode()) { 190 .Debug => true, 191 .ReleaseSafe => true, 192 .ReleaseFast => false, 193 .ReleaseSmall => false, 194 }; 195 } 196 197 pub fn getFileScope(block: *Block) *Module.File { 198 return block.namespace.file_scope; 199 } 200 201 pub fn addTy( 202 block: *Block, 203 tag: Air.Inst.Tag, 204 ty: Type, 205 ) error{OutOfMemory}!Air.Inst.Ref { 206 return block.addInst(.{ 207 .tag = tag, 208 .data = .{ .ty = ty }, 209 }); 210 } 211 212 pub fn addTyOp( 213 block: *Block, 214 tag: Air.Inst.Tag, 215 ty: Type, 216 operand: Air.Inst.Ref, 217 ) error{OutOfMemory}!Air.Inst.Ref { 218 return block.addInst(.{ 219 .tag = tag, 220 .data = .{ .ty_op = .{ 221 .ty = try block.sema.addType(ty), 222 .operand = operand, 223 } }, 224 }); 225 } 226 227 pub fn addBitCast(block: *Block, ty: Type, operand: Air.Inst.Ref) Allocator.Error!Air.Inst.Ref { 228 return block.addInst(.{ 229 .tag = .bitcast, 230 .data = .{ .ty_op = .{ 231 .ty = try block.sema.addType(ty), 232 .operand = operand, 233 } }, 234 }); 235 } 236 237 pub fn addNoOp(block: *Block, tag: Air.Inst.Tag) error{OutOfMemory}!Air.Inst.Ref { 238 return block.addInst(.{ 239 .tag = tag, 240 .data = .{ .no_op = {} }, 241 }); 242 } 243 244 pub fn addUnOp( 245 block: *Block, 246 tag: Air.Inst.Tag, 247 operand: Air.Inst.Ref, 248 ) error{OutOfMemory}!Air.Inst.Ref { 249 return block.addInst(.{ 250 .tag = tag, 251 .data = .{ .un_op = operand }, 252 }); 253 } 254 255 pub fn addBr( 256 block: *Block, 257 target_block: Air.Inst.Index, 258 operand: Air.Inst.Ref, 259 ) error{OutOfMemory}!Air.Inst.Ref { 260 return block.addInst(.{ 261 .tag = .br, 262 .data = .{ .br = .{ 263 .block_inst = target_block, 264 .operand = operand, 265 } }, 266 }); 267 } 268 269 pub fn addBinOp( 270 block: *Block, 271 tag: Air.Inst.Tag, 272 lhs: Air.Inst.Ref, 273 rhs: Air.Inst.Ref, 274 ) error{OutOfMemory}!Air.Inst.Ref { 275 return block.addInst(.{ 276 .tag = tag, 277 .data = .{ .bin_op = .{ 278 .lhs = lhs, 279 .rhs = rhs, 280 } }, 281 }); 282 } 283 284 pub fn addArg(block: *Block, ty: Type, name: u32) error{OutOfMemory}!Air.Inst.Ref { 285 return block.addInst(.{ 286 .tag = .arg, 287 .data = .{ .ty_str = .{ 288 .ty = try block.sema.addType(ty), 289 .str = name, 290 } }, 291 }); 292 } 293 294 pub fn addStructFieldPtr( 295 block: *Block, 296 struct_ptr: Air.Inst.Ref, 297 field_index: u32, 298 ptr_field_ty: Type, 299 ) !Air.Inst.Ref { 300 const ty = try block.sema.addType(ptr_field_ty); 301 const tag: Air.Inst.Tag = switch (field_index) { 302 0 => .struct_field_ptr_index_0, 303 1 => .struct_field_ptr_index_1, 304 2 => .struct_field_ptr_index_2, 305 3 => .struct_field_ptr_index_3, 306 else => { 307 return block.addInst(.{ 308 .tag = .struct_field_ptr, 309 .data = .{ .ty_pl = .{ 310 .ty = ty, 311 .payload = try block.sema.addExtra(Air.StructField{ 312 .struct_operand = struct_ptr, 313 .field_index = field_index, 314 }), 315 } }, 316 }); 317 }, 318 }; 319 return block.addInst(.{ 320 .tag = tag, 321 .data = .{ .ty_op = .{ 322 .ty = ty, 323 .operand = struct_ptr, 324 } }, 325 }); 326 } 327 328 pub fn addStructFieldVal( 329 block: *Block, 330 struct_val: Air.Inst.Ref, 331 field_index: u32, 332 field_ty: Type, 333 ) !Air.Inst.Ref { 334 return block.addInst(.{ 335 .tag = .struct_field_val, 336 .data = .{ .ty_pl = .{ 337 .ty = try block.sema.addType(field_ty), 338 .payload = try block.sema.addExtra(Air.StructField{ 339 .struct_operand = struct_val, 340 .field_index = field_index, 341 }), 342 } }, 343 }); 344 } 345 346 pub fn addSliceElemPtr( 347 block: *Block, 348 slice: Air.Inst.Ref, 349 elem_index: Air.Inst.Ref, 350 elem_ptr_ty: Type, 351 ) !Air.Inst.Ref { 352 return block.addInst(.{ 353 .tag = .slice_elem_ptr, 354 .data = .{ .ty_pl = .{ 355 .ty = try block.sema.addType(elem_ptr_ty), 356 .payload = try block.sema.addExtra(Air.Bin{ 357 .lhs = slice, 358 .rhs = elem_index, 359 }), 360 } }, 361 }); 362 } 363 364 pub fn addPtrElemPtr( 365 block: *Block, 366 array_ptr: Air.Inst.Ref, 367 elem_index: Air.Inst.Ref, 368 elem_ptr_ty: Type, 369 ) !Air.Inst.Ref { 370 return block.addInst(.{ 371 .tag = .ptr_elem_ptr, 372 .data = .{ .ty_pl = .{ 373 .ty = try block.sema.addType(elem_ptr_ty), 374 .payload = try block.sema.addExtra(Air.Bin{ 375 .lhs = array_ptr, 376 .rhs = elem_index, 377 }), 378 } }, 379 }); 380 } 381 382 pub fn addInst(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref { 383 return Air.indexToRef(try block.addInstAsIndex(inst)); 384 } 385 386 pub fn addInstAsIndex(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Index { 387 const sema = block.sema; 388 const gpa = sema.gpa; 389 390 try sema.air_instructions.ensureUnusedCapacity(gpa, 1); 391 try block.instructions.ensureUnusedCapacity(gpa, 1); 392 393 const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len); 394 sema.air_instructions.appendAssumeCapacity(inst); 395 block.instructions.appendAssumeCapacity(result_index); 396 return result_index; 397 } 398 399 fn addUnreachable(block: *Block, src: LazySrcLoc, safety_check: bool) !void { 400 if (safety_check and block.wantSafety()) { 401 _ = try block.sema.safetyPanic(block, src, .unreach); 402 } else { 403 _ = try block.addNoOp(.unreach); 404 } 405 } 406 407 pub fn startAnonDecl(block: *Block) !WipAnonDecl { 408 return WipAnonDecl{ 409 .block = block, 410 .new_decl_arena = std.heap.ArenaAllocator.init(block.sema.gpa), 411 .finished = false, 412 }; 413 } 414 415 pub const WipAnonDecl = struct { 416 block: *Block, 417 new_decl_arena: std.heap.ArenaAllocator, 418 finished: bool, 419 420 pub fn arena(wad: *WipAnonDecl) Allocator { 421 return wad.new_decl_arena.allocator(); 422 } 423 424 pub fn deinit(wad: *WipAnonDecl) void { 425 if (!wad.finished) { 426 wad.new_decl_arena.deinit(); 427 } 428 wad.* = undefined; 429 } 430 431 pub fn finish(wad: *WipAnonDecl, ty: Type, val: Value) !*Decl { 432 const new_decl = try wad.block.sema.mod.createAnonymousDecl(wad.block, .{ 433 .ty = ty, 434 .val = val, 435 }); 436 errdefer wad.block.sema.mod.abortAnonDecl(new_decl); 437 try new_decl.finalizeNewArena(&wad.new_decl_arena); 438 wad.finished = true; 439 return new_decl; 440 } 441 }; 442}; 443 444pub fn deinit(sema: *Sema) void { 445 const gpa = sema.gpa; 446 sema.air_instructions.deinit(gpa); 447 sema.air_extra.deinit(gpa); 448 sema.air_values.deinit(gpa); 449 sema.inst_map.deinit(gpa); 450 sema.decl_val_table.deinit(gpa); 451 sema.* = undefined; 452} 453 454/// Returns only the result from the body that is specified. 455/// Only appropriate to call when it is determined at comptime that this body 456/// has no peers. 457fn resolveBody(sema: *Sema, block: *Block, body: []const Zir.Inst.Index) CompileError!Air.Inst.Ref { 458 const break_inst = try sema.analyzeBody(block, body); 459 const operand_ref = sema.code.instructions.items(.data)[break_inst].@"break".operand; 460 return sema.resolveInst(operand_ref); 461} 462 463/// ZIR instructions which are always `noreturn` return this. This matches the 464/// return type of `analyzeBody` so that we can tail call them. 465/// Only appropriate to return when the instruction is known to be NoReturn 466/// solely based on the ZIR tag. 467const always_noreturn: CompileError!Zir.Inst.Index = @as(Zir.Inst.Index, undefined); 468 469/// This function is the main loop of `Sema` and it can be used in two different ways: 470/// * The traditional way where there are N breaks out of the block and peer type 471/// resolution is done on the break operands. In this case, the `Zir.Inst.Index` 472/// part of the return value will be `undefined`, and callsites should ignore it, 473/// finding the block result value via the block scope. 474/// * The "flat" way. There is only 1 break out of the block, and it is with a `break_inline` 475/// instruction. In this case, the `Zir.Inst.Index` part of the return value will be 476/// the break instruction. This communicates both which block the break applies to, as 477/// well as the operand. No block scope needs to be created for this strategy. 478pub fn analyzeBody( 479 sema: *Sema, 480 block: *Block, 481 body: []const Zir.Inst.Index, 482) CompileError!Zir.Inst.Index { 483 // No tracy calls here, to avoid interfering with the tail call mechanism. 484 485 const parent_capture_scope = block.wip_capture_scope; 486 487 var wip_captures = WipCaptureScope{ 488 .finalized = true, 489 .scope = parent_capture_scope, 490 .perm_arena = sema.perm_arena, 491 .gpa = sema.gpa, 492 }; 493 defer if (wip_captures.scope != parent_capture_scope) { 494 wip_captures.deinit(); 495 }; 496 497 const map = &sema.inst_map; 498 const tags = sema.code.instructions.items(.tag); 499 const datas = sema.code.instructions.items(.data); 500 501 var orig_captures: usize = parent_capture_scope.captures.count(); 502 503 var crash_info = crash_report.prepAnalyzeBody(sema, block, body); 504 crash_info.push(); 505 defer crash_info.pop(); 506 507 // We use a while(true) loop here to avoid a redundant way of breaking out of 508 // the loop. The only way to break out of the loop is with a `noreturn` 509 // instruction. 510 var i: usize = 0; 511 const result = while (true) { 512 crash_info.setBodyIndex(i); 513 const inst = body[i]; 514 const air_inst: Air.Inst.Ref = switch (tags[inst]) { 515 // zig fmt: off 516 .alloc => try sema.zirAlloc(block, inst), 517 .alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), 518 .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), 519 .alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst), 520 .alloc_mut => try sema.zirAllocMut(block, inst), 521 .alloc_comptime => try sema.zirAllocComptime(block, inst), 522 .anyframe_type => try sema.zirAnyframeType(block, inst), 523 .array_cat => try sema.zirArrayCat(block, inst), 524 .array_mul => try sema.zirArrayMul(block, inst), 525 .array_type => try sema.zirArrayType(block, inst), 526 .array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), 527 .vector_type => try sema.zirVectorType(block, inst), 528 .as => try sema.zirAs(block, inst), 529 .as_node => try sema.zirAsNode(block, inst), 530 .bit_and => try sema.zirBitwise(block, inst, .bit_and), 531 .bit_not => try sema.zirBitNot(block, inst), 532 .bit_or => try sema.zirBitwise(block, inst, .bit_or), 533 .bitcast => try sema.zirBitcast(block, inst), 534 .suspend_block => try sema.zirSuspendBlock(block, inst), 535 .bool_not => try sema.zirBoolNot(block, inst), 536 .bool_br_and => try sema.zirBoolBr(block, inst, false), 537 .bool_br_or => try sema.zirBoolBr(block, inst, true), 538 .c_import => try sema.zirCImport(block, inst), 539 .call => try sema.zirCall(block, inst), 540 .closure_get => try sema.zirClosureGet(block, inst), 541 .cmp_lt => try sema.zirCmp(block, inst, .lt), 542 .cmp_lte => try sema.zirCmp(block, inst, .lte), 543 .cmp_eq => try sema.zirCmpEq(block, inst, .eq, .cmp_eq), 544 .cmp_gte => try sema.zirCmp(block, inst, .gte), 545 .cmp_gt => try sema.zirCmp(block, inst, .gt), 546 .cmp_neq => try sema.zirCmpEq(block, inst, .neq, .cmp_neq), 547 .coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst), 548 .decl_ref => try sema.zirDeclRef(block, inst), 549 .decl_val => try sema.zirDeclVal(block, inst), 550 .load => try sema.zirLoad(block, inst), 551 .elem_ptr => try sema.zirElemPtr(block, inst), 552 .elem_ptr_node => try sema.zirElemPtrNode(block, inst), 553 .elem_ptr_imm => try sema.zirElemPtrImm(block, inst), 554 .elem_val => try sema.zirElemVal(block, inst), 555 .elem_val_node => try sema.zirElemValNode(block, inst), 556 .elem_type => try sema.zirElemType(block, inst), 557 .enum_literal => try sema.zirEnumLiteral(block, inst), 558 .enum_to_int => try sema.zirEnumToInt(block, inst), 559 .int_to_enum => try sema.zirIntToEnum(block, inst), 560 .err_union_code => try sema.zirErrUnionCode(block, inst), 561 .err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst), 562 .err_union_payload_safe => try sema.zirErrUnionPayload(block, inst, true), 563 .err_union_payload_safe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, true), 564 .err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst, false), 565 .err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, false), 566 .error_union_type => try sema.zirErrorUnionType(block, inst), 567 .error_value => try sema.zirErrorValue(block, inst), 568 .error_to_int => try sema.zirErrorToInt(block, inst), 569 .int_to_error => try sema.zirIntToError(block, inst), 570 .field_ptr => try sema.zirFieldPtr(block, inst), 571 .field_ptr_named => try sema.zirFieldPtrNamed(block, inst), 572 .field_val => try sema.zirFieldVal(block, inst), 573 .field_val_named => try sema.zirFieldValNamed(block, inst), 574 .field_call_bind => try sema.zirFieldCallBind(block, inst), 575 .field_call_bind_named => try sema.zirFieldCallBindNamed(block, inst), 576 .func => try sema.zirFunc(block, inst, false), 577 .func_inferred => try sema.zirFunc(block, inst, true), 578 .import => try sema.zirImport(block, inst), 579 .indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst), 580 .int => try sema.zirInt(block, inst), 581 .int_big => try sema.zirIntBig(block, inst), 582 .float => try sema.zirFloat(block, inst), 583 .float128 => try sema.zirFloat128(block, inst), 584 .int_type => try sema.zirIntType(block, inst), 585 .is_non_err => try sema.zirIsNonErr(block, inst), 586 .is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), 587 .is_non_null => try sema.zirIsNonNull(block, inst), 588 .is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst), 589 .merge_error_sets => try sema.zirMergeErrorSets(block, inst), 590 .negate => try sema.zirNegate(block, inst, .sub), 591 .negate_wrap => try sema.zirNegate(block, inst, .subwrap), 592 .optional_payload_safe => try sema.zirOptionalPayload(block, inst, true), 593 .optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true), 594 .optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false), 595 .optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false), 596 .optional_type => try sema.zirOptionalType(block, inst), 597 .ptr_type => try sema.zirPtrType(block, inst), 598 .ptr_type_simple => try sema.zirPtrTypeSimple(block, inst), 599 .ref => try sema.zirRef(block, inst), 600 .ret_err_value_code => try sema.zirRetErrValueCode(block, inst), 601 .shr => try sema.zirShr(block, inst), 602 .slice_end => try sema.zirSliceEnd(block, inst), 603 .slice_sentinel => try sema.zirSliceSentinel(block, inst), 604 .slice_start => try sema.zirSliceStart(block, inst), 605 .str => try sema.zirStr(block, inst), 606 .switch_block => try sema.zirSwitchBlock(block, inst), 607 .switch_cond => try sema.zirSwitchCond(block, inst, false), 608 .switch_cond_ref => try sema.zirSwitchCond(block, inst, true), 609 .switch_capture => try sema.zirSwitchCapture(block, inst, false, false), 610 .switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true), 611 .switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false), 612 .switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true), 613 .switch_capture_else => try sema.zirSwitchCaptureElse(block, inst, false), 614 .switch_capture_else_ref => try sema.zirSwitchCaptureElse(block, inst, true), 615 .type_info => try sema.zirTypeInfo(block, inst), 616 .size_of => try sema.zirSizeOf(block, inst), 617 .bit_size_of => try sema.zirBitSizeOf(block, inst), 618 .typeof => try sema.zirTypeof(block, inst), 619 .log2_int_type => try sema.zirLog2IntType(block, inst), 620 .typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst), 621 .xor => try sema.zirBitwise(block, inst, .xor), 622 .struct_init_empty => try sema.zirStructInitEmpty(block, inst), 623 .struct_init => try sema.zirStructInit(block, inst, false), 624 .struct_init_ref => try sema.zirStructInit(block, inst, true), 625 .struct_init_anon => try sema.zirStructInitAnon(block, inst, false), 626 .struct_init_anon_ref => try sema.zirStructInitAnon(block, inst, true), 627 .array_init => try sema.zirArrayInit(block, inst, false), 628 .array_init_ref => try sema.zirArrayInit(block, inst, true), 629 .array_init_anon => try sema.zirArrayInitAnon(block, inst, false), 630 .array_init_anon_ref => try sema.zirArrayInitAnon(block, inst, true), 631 .union_init_ptr => try sema.zirUnionInitPtr(block, inst), 632 .field_type => try sema.zirFieldType(block, inst), 633 .field_type_ref => try sema.zirFieldTypeRef(block, inst), 634 .ptr_to_int => try sema.zirPtrToInt(block, inst), 635 .align_of => try sema.zirAlignOf(block, inst), 636 .bool_to_int => try sema.zirBoolToInt(block, inst), 637 .embed_file => try sema.zirEmbedFile(block, inst), 638 .error_name => try sema.zirErrorName(block, inst), 639 .tag_name => try sema.zirTagName(block, inst), 640 .reify => try sema.zirReify(block, inst), 641 .type_name => try sema.zirTypeName(block, inst), 642 .frame_type => try sema.zirFrameType(block, inst), 643 .frame_size => try sema.zirFrameSize(block, inst), 644 .float_to_int => try sema.zirFloatToInt(block, inst), 645 .int_to_float => try sema.zirIntToFloat(block, inst), 646 .int_to_ptr => try sema.zirIntToPtr(block, inst), 647 .float_cast => try sema.zirFloatCast(block, inst), 648 .int_cast => try sema.zirIntCast(block, inst), 649 .err_set_cast => try sema.zirErrSetCast(block, inst), 650 .ptr_cast => try sema.zirPtrCast(block, inst), 651 .truncate => try sema.zirTruncate(block, inst), 652 .align_cast => try sema.zirAlignCast(block, inst), 653 .has_decl => try sema.zirHasDecl(block, inst), 654 .has_field => try sema.zirHasField(block, inst), 655 .clz => try sema.zirClz(block, inst), 656 .ctz => try sema.zirCtz(block, inst), 657 .pop_count => try sema.zirPopCount(block, inst), 658 .byte_swap => try sema.zirByteSwap(block, inst), 659 .bit_reverse => try sema.zirBitReverse(block, inst), 660 .shr_exact => try sema.zirShrExact(block, inst), 661 .bit_offset_of => try sema.zirBitOffsetOf(block, inst), 662 .offset_of => try sema.zirOffsetOf(block, inst), 663 .cmpxchg_strong => try sema.zirCmpxchg(block, inst, .cmpxchg_strong), 664 .cmpxchg_weak => try sema.zirCmpxchg(block, inst, .cmpxchg_weak), 665 .splat => try sema.zirSplat(block, inst), 666 .reduce => try sema.zirReduce(block, inst), 667 .shuffle => try sema.zirShuffle(block, inst), 668 .select => try sema.zirSelect(block, inst), 669 .atomic_load => try sema.zirAtomicLoad(block, inst), 670 .atomic_rmw => try sema.zirAtomicRmw(block, inst), 671 .mul_add => try sema.zirMulAdd(block, inst), 672 .builtin_call => try sema.zirBuiltinCall(block, inst), 673 .field_ptr_type => try sema.zirFieldPtrType(block, inst), 674 .field_parent_ptr => try sema.zirFieldParentPtr(block, inst), 675 .builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst), 676 .@"resume" => try sema.zirResume(block, inst), 677 .@"await" => try sema.zirAwait(block, inst, false), 678 .await_nosuspend => try sema.zirAwait(block, inst, true), 679 .extended => try sema.zirExtended(block, inst), 680 681 .sqrt => try sema.zirUnaryMath(block, inst), 682 .sin => try sema.zirUnaryMath(block, inst), 683 .cos => try sema.zirUnaryMath(block, inst), 684 .exp => try sema.zirUnaryMath(block, inst), 685 .exp2 => try sema.zirUnaryMath(block, inst), 686 .log => try sema.zirUnaryMath(block, inst), 687 .log2 => try sema.zirUnaryMath(block, inst), 688 .log10 => try sema.zirUnaryMath(block, inst), 689 .fabs => try sema.zirUnaryMath(block, inst), 690 .floor => try sema.zirUnaryMath(block, inst), 691 .ceil => try sema.zirUnaryMath(block, inst), 692 .trunc => try sema.zirUnaryMath(block, inst), 693 .round => try sema.zirUnaryMath(block, inst), 694 695 .error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent), 696 .error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon), 697 .error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func), 698 699 .add => try sema.zirArithmetic(block, inst, .add), 700 .addwrap => try sema.zirArithmetic(block, inst, .addwrap), 701 .add_sat => try sema.zirArithmetic(block, inst, .add_sat), 702 .div => try sema.zirArithmetic(block, inst, .div), 703 .div_exact => try sema.zirArithmetic(block, inst, .div_exact), 704 .div_floor => try sema.zirArithmetic(block, inst, .div_floor), 705 .div_trunc => try sema.zirArithmetic(block, inst, .div_trunc), 706 .mod_rem => try sema.zirArithmetic(block, inst, .mod_rem), 707 .mod => try sema.zirArithmetic(block, inst, .mod), 708 .rem => try sema.zirArithmetic(block, inst, .rem), 709 .mul => try sema.zirArithmetic(block, inst, .mul), 710 .mulwrap => try sema.zirArithmetic(block, inst, .mulwrap), 711 .mul_sat => try sema.zirArithmetic(block, inst, .mul_sat), 712 .sub => try sema.zirArithmetic(block, inst, .sub), 713 .subwrap => try sema.zirArithmetic(block, inst, .subwrap), 714 .sub_sat => try sema.zirArithmetic(block, inst, .sub_sat), 715 716 .maximum => try sema.zirMinMax(block, inst, .max), 717 .minimum => try sema.zirMinMax(block, inst, .min), 718 719 .shl => try sema.zirShl(block, inst, .shl), 720 .shl_exact => try sema.zirShl(block, inst, .shl_exact), 721 .shl_sat => try sema.zirShl(block, inst, .shl_sat), 722 723 // Instructions that we know to *always* be noreturn based solely on their tag. 724 // These functions match the return type of analyzeBody so that we can 725 // tail call them here. 726 .compile_error => break sema.zirCompileError(block, inst), 727 .ret_coerce => break sema.zirRetCoerce(block, inst), 728 .ret_node => break sema.zirRetNode(block, inst), 729 .ret_load => break sema.zirRetLoad(block, inst), 730 .ret_err_value => break sema.zirRetErrValue(block, inst), 731 .@"unreachable" => break sema.zirUnreachable(block, inst), 732 .panic => break sema.zirPanic(block, inst), 733 // zig fmt: on 734 735 // Instructions that we know can *never* be noreturn based solely on 736 // their tag. We avoid needlessly checking if they are noreturn and 737 // continue the loop. 738 // We also know that they cannot be referenced later, so we avoid 739 // putting them into the map. 740 .breakpoint => { 741 if (!block.is_comptime) { 742 _ = try block.addNoOp(.breakpoint); 743 } 744 i += 1; 745 continue; 746 }, 747 .fence => { 748 try sema.zirFence(block, inst); 749 i += 1; 750 continue; 751 }, 752 .dbg_stmt => { 753 try sema.zirDbgStmt(block, inst); 754 i += 1; 755 continue; 756 }, 757 .ensure_err_payload_void => { 758 try sema.zirEnsureErrPayloadVoid(block, inst); 759 i += 1; 760 continue; 761 }, 762 .ensure_result_non_error => { 763 try sema.zirEnsureResultNonError(block, inst); 764 i += 1; 765 continue; 766 }, 767 .ensure_result_used => { 768 try sema.zirEnsureResultUsed(block, inst); 769 i += 1; 770 continue; 771 }, 772 .set_eval_branch_quota => { 773 try sema.zirSetEvalBranchQuota(block, inst); 774 i += 1; 775 continue; 776 }, 777 .atomic_store => { 778 try sema.zirAtomicStore(block, inst); 779 i += 1; 780 continue; 781 }, 782 .store => { 783 try sema.zirStore(block, inst); 784 i += 1; 785 continue; 786 }, 787 .store_node => { 788 try sema.zirStoreNode(block, inst); 789 i += 1; 790 continue; 791 }, 792 .store_to_block_ptr => { 793 try sema.zirStoreToBlockPtr(block, inst); 794 i += 1; 795 continue; 796 }, 797 .store_to_inferred_ptr => { 798 try sema.zirStoreToInferredPtr(block, inst); 799 i += 1; 800 continue; 801 }, 802 .resolve_inferred_alloc => { 803 try sema.zirResolveInferredAlloc(block, inst); 804 i += 1; 805 continue; 806 }, 807 .validate_struct_init => { 808 try sema.zirValidateStructInit(block, inst); 809 i += 1; 810 continue; 811 }, 812 .validate_array_init => { 813 try sema.zirValidateArrayInit(block, inst); 814 i += 1; 815 continue; 816 }, 817 .@"export" => { 818 try sema.zirExport(block, inst); 819 i += 1; 820 continue; 821 }, 822 .export_value => { 823 try sema.zirExportValue(block, inst); 824 i += 1; 825 continue; 826 }, 827 .set_align_stack => { 828 try sema.zirSetAlignStack(block, inst); 829 i += 1; 830 continue; 831 }, 832 .set_cold => { 833 try sema.zirSetCold(block, inst); 834 i += 1; 835 continue; 836 }, 837 .set_float_mode => { 838 try sema.zirSetFloatMode(block, inst); 839 i += 1; 840 continue; 841 }, 842 .set_runtime_safety => { 843 try sema.zirSetRuntimeSafety(block, inst); 844 i += 1; 845 continue; 846 }, 847 .param => { 848 try sema.zirParam(block, inst, false); 849 i += 1; 850 continue; 851 }, 852 .param_comptime => { 853 try sema.zirParam(block, inst, true); 854 i += 1; 855 continue; 856 }, 857 .param_anytype => { 858 try sema.zirParamAnytype(block, inst, false); 859 i += 1; 860 continue; 861 }, 862 .param_anytype_comptime => { 863 try sema.zirParamAnytype(block, inst, true); 864 i += 1; 865 continue; 866 }, 867 .closure_capture => { 868 try sema.zirClosureCapture(block, inst); 869 i += 1; 870 continue; 871 }, 872 .memcpy => { 873 try sema.zirMemcpy(block, inst); 874 i += 1; 875 continue; 876 }, 877 .memset => { 878 try sema.zirMemset(block, inst); 879 i += 1; 880 continue; 881 }, 882 883 // Special case instructions to handle comptime control flow. 884 .@"break" => { 885 if (block.is_comptime) { 886 break inst; // same as break_inline 887 } else { 888 break sema.zirBreak(block, inst); 889 } 890 }, 891 .break_inline => break inst, 892 .repeat => { 893 if (block.is_comptime) { 894 // Send comptime control flow back to the beginning of this block. 895 const src: LazySrcLoc = .{ .node_offset = datas[inst].node }; 896 try sema.emitBackwardBranch(block, src); 897 if (wip_captures.scope.captures.count() != orig_captures) { 898 try wip_captures.reset(parent_capture_scope); 899 block.wip_capture_scope = wip_captures.scope; 900 orig_captures = 0; 901 } 902 i = 0; 903 continue; 904 } else { 905 const src_node = sema.code.instructions.items(.data)[inst].node; 906 const src: LazySrcLoc = .{ .node_offset = src_node }; 907 try sema.requireRuntimeBlock(block, src); 908 break always_noreturn; 909 } 910 }, 911 .repeat_inline => { 912 // Send comptime control flow back to the beginning of this block. 913 const src: LazySrcLoc = .{ .node_offset = datas[inst].node }; 914 try sema.emitBackwardBranch(block, src); 915 if (wip_captures.scope.captures.count() != orig_captures) { 916 try wip_captures.reset(parent_capture_scope); 917 block.wip_capture_scope = wip_captures.scope; 918 orig_captures = 0; 919 } 920 i = 0; 921 continue; 922 }, 923 .loop => blk: { 924 if (!block.is_comptime) break :blk try sema.zirLoop(block, inst); 925 // Same as `block_inline`. TODO https://github.com/ziglang/zig/issues/8220 926 const inst_data = datas[inst].pl_node; 927 const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); 928 const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; 929 const break_inst = try sema.analyzeBody(block, inline_body); 930 const break_data = datas[break_inst].@"break"; 931 if (inst == break_data.block_inst) { 932 break :blk sema.resolveInst(break_data.operand); 933 } else { 934 break break_inst; 935 } 936 }, 937 .block => blk: { 938 if (!block.is_comptime) break :blk try sema.zirBlock(block, inst); 939 // Same as `block_inline`. TODO https://github.com/ziglang/zig/issues/8220 940 const inst_data = datas[inst].pl_node; 941 const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); 942 const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; 943 const break_inst = try sema.analyzeBody(block, inline_body); 944 const break_data = datas[break_inst].@"break"; 945 if (inst == break_data.block_inst) { 946 break :blk sema.resolveInst(break_data.operand); 947 } else { 948 break break_inst; 949 } 950 }, 951 .block_inline => blk: { 952 // Directly analyze the block body without introducing a new block. 953 const inst_data = datas[inst].pl_node; 954 const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); 955 const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; 956 const break_inst = try sema.analyzeBody(block, inline_body); 957 const break_data = datas[break_inst].@"break"; 958 if (inst == break_data.block_inst) { 959 break :blk sema.resolveInst(break_data.operand); 960 } else { 961 break break_inst; 962 } 963 }, 964 .condbr => blk: { 965 if (!block.is_comptime) break sema.zirCondbr(block, inst); 966 // Same as condbr_inline. TODO https://github.com/ziglang/zig/issues/8220 967 const inst_data = datas[inst].pl_node; 968 const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; 969 const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); 970 const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; 971 const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; 972 const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); 973 const inline_body = if (cond.val.toBool()) then_body else else_body; 974 const break_inst = try sema.analyzeBody(block, inline_body); 975 const break_data = datas[break_inst].@"break"; 976 if (inst == break_data.block_inst) { 977 break :blk sema.resolveInst(break_data.operand); 978 } else { 979 break break_inst; 980 } 981 }, 982 .condbr_inline => blk: { 983 const inst_data = datas[inst].pl_node; 984 const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; 985 const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); 986 const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; 987 const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; 988 const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); 989 const inline_body = if (cond.val.toBool()) then_body else else_body; 990 const break_inst = try sema.analyzeBody(block, inline_body); 991 const break_data = datas[break_inst].@"break"; 992 if (inst == break_data.block_inst) { 993 break :blk sema.resolveInst(break_data.operand); 994 } else { 995 break break_inst; 996 } 997 }, 998 }; 999 if (sema.typeOf(air_inst).isNoReturn()) 1000 break always_noreturn; 1001 try map.put(sema.gpa, inst, air_inst); 1002 i += 1; 1003 } else unreachable; 1004 1005 if (!wip_captures.finalized) { 1006 try wip_captures.finalize(); 1007 block.wip_capture_scope = parent_capture_scope; 1008 } 1009 1010 return result; 1011} 1012 1013fn zirExtended(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 1014 const extended = sema.code.instructions.items(.data)[inst].extended; 1015 switch (extended.opcode) { 1016 // zig fmt: off 1017 .func => return sema.zirFuncExtended( block, extended, inst), 1018 .variable => return sema.zirVarExtended( block, extended), 1019 .struct_decl => return sema.zirStructDecl( block, extended, inst), 1020 .enum_decl => return sema.zirEnumDecl( block, extended), 1021 .union_decl => return sema.zirUnionDecl( block, extended, inst), 1022 .opaque_decl => return sema.zirOpaqueDecl( block, extended), 1023 .ret_ptr => return sema.zirRetPtr( block, extended), 1024 .ret_type => return sema.zirRetType( block, extended), 1025 .this => return sema.zirThis( block, extended), 1026 .ret_addr => return sema.zirRetAddr( block, extended), 1027 .builtin_src => return sema.zirBuiltinSrc( block, extended), 1028 .error_return_trace => return sema.zirErrorReturnTrace( block, extended), 1029 .frame => return sema.zirFrame( block, extended), 1030 .frame_address => return sema.zirFrameAddress( block, extended), 1031 .alloc => return sema.zirAllocExtended( block, extended), 1032 .builtin_extern => return sema.zirBuiltinExtern( block, extended), 1033 .@"asm" => return sema.zirAsm( block, extended, inst), 1034 .typeof_peer => return sema.zirTypeofPeer( block, extended), 1035 .compile_log => return sema.zirCompileLog( block, extended), 1036 .add_with_overflow => return sema.zirOverflowArithmetic(block, extended), 1037 .sub_with_overflow => return sema.zirOverflowArithmetic(block, extended), 1038 .mul_with_overflow => return sema.zirOverflowArithmetic(block, extended), 1039 .shl_with_overflow => return sema.zirOverflowArithmetic(block, extended), 1040 .c_undef => return sema.zirCUndef( block, extended), 1041 .c_include => return sema.zirCInclude( block, extended), 1042 .c_define => return sema.zirCDefine( block, extended), 1043 .wasm_memory_size => return sema.zirWasmMemorySize( block, extended), 1044 .wasm_memory_grow => return sema.zirWasmMemoryGrow( block, extended), 1045 .prefetch => return sema.zirPrefetch( block, extended), 1046 // zig fmt: on 1047 } 1048} 1049 1050pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) Air.Inst.Ref { 1051 var i: usize = @enumToInt(zir_ref); 1052 1053 // First section of indexes correspond to a set number of constant values. 1054 if (i < Zir.Inst.Ref.typed_value_map.len) { 1055 // We intentionally map the same indexes to the same values between ZIR and AIR. 1056 return zir_ref; 1057 } 1058 i -= Zir.Inst.Ref.typed_value_map.len; 1059 1060 // Finally, the last section of indexes refers to the map of ZIR=>AIR. 1061 return sema.inst_map.get(@intCast(u32, i)).?; 1062} 1063 1064fn resolveConstBool( 1065 sema: *Sema, 1066 block: *Block, 1067 src: LazySrcLoc, 1068 zir_ref: Zir.Inst.Ref, 1069) !bool { 1070 const air_inst = sema.resolveInst(zir_ref); 1071 const wanted_type = Type.initTag(.bool); 1072 const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); 1073 const val = try sema.resolveConstValue(block, src, coerced_inst); 1074 return val.toBool(); 1075} 1076 1077fn resolveConstString( 1078 sema: *Sema, 1079 block: *Block, 1080 src: LazySrcLoc, 1081 zir_ref: Zir.Inst.Ref, 1082) ![]u8 { 1083 const air_inst = sema.resolveInst(zir_ref); 1084 const wanted_type = Type.initTag(.const_slice_u8); 1085 const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); 1086 const val = try sema.resolveConstValue(block, src, coerced_inst); 1087 return val.toAllocatedBytes(wanted_type, sema.arena); 1088} 1089 1090pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type { 1091 const air_inst = sema.resolveInst(zir_ref); 1092 const ty = try sema.analyzeAsType(block, src, air_inst); 1093 if (ty.tag() == .generic_poison) return error.GenericPoison; 1094 return ty; 1095} 1096 1097fn analyzeAsType( 1098 sema: *Sema, 1099 block: *Block, 1100 src: LazySrcLoc, 1101 air_inst: Air.Inst.Ref, 1102) !Type { 1103 const wanted_type = Type.initTag(.@"type"); 1104 const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); 1105 const val = try sema.resolveConstValue(block, src, coerced_inst); 1106 var buffer: Value.ToTypeBuffer = undefined; 1107 const ty = val.toType(&buffer); 1108 return ty.copy(sema.arena); 1109} 1110 1111/// May return Value Tags: `variable`, `undef`. 1112/// See `resolveConstValue` for an alternative. 1113/// Value Tag `generic_poison` causes `error.GenericPoison` to be returned. 1114fn resolveValue( 1115 sema: *Sema, 1116 block: *Block, 1117 src: LazySrcLoc, 1118 air_ref: Air.Inst.Ref, 1119) CompileError!Value { 1120 if (try sema.resolveMaybeUndefValAllowVariables(block, src, air_ref)) |val| { 1121 if (val.tag() == .generic_poison) return error.GenericPoison; 1122 return val; 1123 } 1124 return sema.failWithNeededComptime(block, src); 1125} 1126 1127/// Value Tag `variable` will cause a compile error. 1128/// Value Tag `undef` may be returned. 1129fn resolveConstMaybeUndefVal( 1130 sema: *Sema, 1131 block: *Block, 1132 src: LazySrcLoc, 1133 inst: Air.Inst.Ref, 1134) CompileError!Value { 1135 if (try sema.resolveMaybeUndefValAllowVariables(block, src, inst)) |val| { 1136 switch (val.tag()) { 1137 .variable => return sema.failWithNeededComptime(block, src), 1138 .generic_poison => return error.GenericPoison, 1139 else => return val, 1140 } 1141 } 1142 return sema.failWithNeededComptime(block, src); 1143} 1144 1145/// Will not return Value Tags: `variable`, `undef`. Instead they will emit compile errors. 1146/// See `resolveValue` for an alternative. 1147fn resolveConstValue( 1148 sema: *Sema, 1149 block: *Block, 1150 src: LazySrcLoc, 1151 air_ref: Air.Inst.Ref, 1152) CompileError!Value { 1153 if (try sema.resolveMaybeUndefValAllowVariables(block, src, air_ref)) |val| { 1154 switch (val.tag()) { 1155 .undef => return sema.failWithUseOfUndef(block, src), 1156 .variable => return sema.failWithNeededComptime(block, src), 1157 .generic_poison => return error.GenericPoison, 1158 else => return val, 1159 } 1160 } 1161 return sema.failWithNeededComptime(block, src); 1162} 1163 1164/// Value Tag `variable` causes this function to return `null`. 1165/// Value Tag `undef` causes this function to return a compile error. 1166fn resolveDefinedValue( 1167 sema: *Sema, 1168 block: *Block, 1169 src: LazySrcLoc, 1170 air_ref: Air.Inst.Ref, 1171) CompileError!?Value { 1172 if (try sema.resolveMaybeUndefVal(block, src, air_ref)) |val| { 1173 if (val.isUndef()) { 1174 return sema.failWithUseOfUndef(block, src); 1175 } 1176 return val; 1177 } 1178 return null; 1179} 1180 1181/// Value Tag `variable` causes this function to return `null`. 1182/// Value Tag `undef` causes this function to return the Value. 1183/// Value Tag `generic_poison` causes `error.GenericPoison` to be returned. 1184fn resolveMaybeUndefVal( 1185 sema: *Sema, 1186 block: *Block, 1187 src: LazySrcLoc, 1188 inst: Air.Inst.Ref, 1189) CompileError!?Value { 1190 const val = (try sema.resolveMaybeUndefValAllowVariables(block, src, inst)) orelse return null; 1191 switch (val.tag()) { 1192 .variable => return null, 1193 .generic_poison => return error.GenericPoison, 1194 else => return val, 1195 } 1196} 1197 1198/// Returns all Value tags including `variable` and `undef`. 1199fn resolveMaybeUndefValAllowVariables( 1200 sema: *Sema, 1201 block: *Block, 1202 src: LazySrcLoc, 1203 inst: Air.Inst.Ref, 1204) CompileError!?Value { 1205 // First section of indexes correspond to a set number of constant values. 1206 var i: usize = @enumToInt(inst); 1207 if (i < Air.Inst.Ref.typed_value_map.len) { 1208 return Air.Inst.Ref.typed_value_map[i].val; 1209 } 1210 i -= Air.Inst.Ref.typed_value_map.len; 1211 1212 if (try sema.typeHasOnePossibleValue(block, src, sema.typeOf(inst))) |opv| { 1213 return opv; 1214 } 1215 const air_tags = sema.air_instructions.items(.tag); 1216 switch (air_tags[i]) { 1217 .constant => { 1218 const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; 1219 return sema.air_values.items[ty_pl.payload]; 1220 }, 1221 .const_ty => { 1222 return try sema.air_instructions.items(.data)[i].ty.toValue(sema.arena); 1223 }, 1224 else => return null, 1225 } 1226} 1227 1228fn failWithNeededComptime(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError { 1229 return sema.fail(block, src, "unable to resolve comptime value", .{}); 1230} 1231 1232fn failWithUseOfUndef(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError { 1233 return sema.fail(block, src, "use of undefined value here causes undefined behavior", .{}); 1234} 1235 1236fn failWithDivideByZero(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError { 1237 return sema.fail(block, src, "division by zero here causes undefined behavior", .{}); 1238} 1239 1240fn failWithModRemNegative(sema: *Sema, block: *Block, src: LazySrcLoc, lhs_ty: Type, rhs_ty: Type) CompileError { 1241 return sema.fail(block, src, "remainder division with '{}' and '{}': signed integers and floats must use @rem or @mod", .{ lhs_ty, rhs_ty }); 1242} 1243 1244fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, optional_ty: Type) CompileError { 1245 return sema.fail(block, src, "expected optional type, found {}", .{optional_ty}); 1246} 1247 1248fn failWithErrorSetCodeMissing( 1249 sema: *Sema, 1250 block: *Block, 1251 src: LazySrcLoc, 1252 dest_err_set_ty: Type, 1253 src_err_set_ty: Type, 1254) CompileError { 1255 return sema.fail(block, src, "expected type '{}', found type '{}'", .{ 1256 dest_err_set_ty, src_err_set_ty, 1257 }); 1258} 1259 1260/// We don't return a pointer to the new error note because the pointer 1261/// becomes invalid when you add another one. 1262fn errNote( 1263 sema: *Sema, 1264 block: *Block, 1265 src: LazySrcLoc, 1266 parent: *Module.ErrorMsg, 1267 comptime format: []const u8, 1268 args: anytype, 1269) error{OutOfMemory}!void { 1270 return sema.mod.errNoteNonLazy(src.toSrcLoc(block.src_decl), parent, format, args); 1271} 1272 1273fn errMsg( 1274 sema: *Sema, 1275 block: *Block, 1276 src: LazySrcLoc, 1277 comptime format: []const u8, 1278 args: anytype, 1279) error{OutOfMemory}!*Module.ErrorMsg { 1280 return Module.ErrorMsg.create(sema.gpa, src.toSrcLoc(block.src_decl), format, args); 1281} 1282 1283pub fn fail( 1284 sema: *Sema, 1285 block: *Block, 1286 src: LazySrcLoc, 1287 comptime format: []const u8, 1288 args: anytype, 1289) CompileError { 1290 const err_msg = try sema.errMsg(block, src, format, args); 1291 return sema.failWithOwnedErrorMsg(err_msg); 1292} 1293 1294fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { 1295 @setCold(true); 1296 1297 if (crash_report.is_enabled and sema.mod.comp.debug_compile_errors) { 1298 std.debug.print("compile error during Sema: {s}, src: {s}:{}\n", .{ 1299 err_msg.msg, 1300 err_msg.src_loc.file_scope.sub_file_path, 1301 err_msg.src_loc.lazy, 1302 }); 1303 crash_report.compilerPanic("unexpected compile error occurred", null); 1304 } 1305 1306 const mod = sema.mod; 1307 1308 { 1309 errdefer err_msg.destroy(mod.gpa); 1310 if (err_msg.src_loc.lazy == .unneeded) { 1311 return error.NeededSourceLocation; 1312 } 1313 try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1); 1314 try mod.failed_files.ensureUnusedCapacity(mod.gpa, 1); 1315 } 1316 if (sema.owner_func) |func| { 1317 func.state = .sema_failure; 1318 } else { 1319 sema.owner_decl.analysis = .sema_failure; 1320 sema.owner_decl.generation = mod.generation; 1321 } 1322 mod.failed_decls.putAssumeCapacityNoClobber(sema.owner_decl, err_msg); 1323 return error.AnalysisFail; 1324} 1325 1326/// Appropriate to call when the coercion has already been done by result 1327/// location semantics. Asserts the value fits in the provided `Int` type. 1328/// Only supports `Int` types 64 bits or less. 1329/// TODO don't ever call this since we're migrating towards ResultLoc.coerced_ty. 1330fn resolveAlreadyCoercedInt( 1331 sema: *Sema, 1332 block: *Block, 1333 src: LazySrcLoc, 1334 zir_ref: Zir.Inst.Ref, 1335 comptime Int: type, 1336) !Int { 1337 comptime assert(@typeInfo(Int).Int.bits <= 64); 1338 const air_inst = sema.resolveInst(zir_ref); 1339 const val = try sema.resolveConstValue(block, src, air_inst); 1340 switch (@typeInfo(Int).Int.signedness) { 1341 .signed => return @intCast(Int, val.toSignedInt()), 1342 .unsigned => return @intCast(Int, val.toUnsignedInt()), 1343 } 1344} 1345 1346fn resolveAlign( 1347 sema: *Sema, 1348 block: *Block, 1349 src: LazySrcLoc, 1350 zir_ref: Zir.Inst.Ref, 1351) !u16 { 1352 const alignment_big = try sema.resolveInt(block, src, zir_ref, Type.initTag(.u16)); 1353 const alignment = @intCast(u16, alignment_big); // We coerce to u16 in the prev line. 1354 if (alignment == 0) return sema.fail(block, src, "alignment must be >= 1", .{}); 1355 if (!std.math.isPowerOfTwo(alignment)) { 1356 return sema.fail(block, src, "alignment value {d} is not a power of two", .{ 1357 alignment, 1358 }); 1359 } 1360 return alignment; 1361} 1362 1363fn resolveInt( 1364 sema: *Sema, 1365 block: *Block, 1366 src: LazySrcLoc, 1367 zir_ref: Zir.Inst.Ref, 1368 dest_ty: Type, 1369) !u64 { 1370 const air_inst = sema.resolveInst(zir_ref); 1371 const coerced = try sema.coerce(block, dest_ty, air_inst, src); 1372 const val = try sema.resolveConstValue(block, src, coerced); 1373 1374 return val.toUnsignedInt(); 1375} 1376 1377// Returns a compile error if the value has tag `variable`. See `resolveInstValue` for 1378// a function that does not. 1379pub fn resolveInstConst( 1380 sema: *Sema, 1381 block: *Block, 1382 src: LazySrcLoc, 1383 zir_ref: Zir.Inst.Ref, 1384) CompileError!TypedValue { 1385 const air_ref = sema.resolveInst(zir_ref); 1386 const val = try sema.resolveConstValue(block, src, air_ref); 1387 return TypedValue{ 1388 .ty = sema.typeOf(air_ref), 1389 .val = val, 1390 }; 1391} 1392 1393// Value Tag may be `undef` or `variable`. 1394// See `resolveInstConst` for an alternative. 1395pub fn resolveInstValue( 1396 sema: *Sema, 1397 block: *Block, 1398 src: LazySrcLoc, 1399 zir_ref: Zir.Inst.Ref, 1400) CompileError!TypedValue { 1401 const air_ref = sema.resolveInst(zir_ref); 1402 const val = try sema.resolveValue(block, src, air_ref); 1403 return TypedValue{ 1404 .ty = sema.typeOf(air_ref), 1405 .val = val, 1406 }; 1407} 1408 1409fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 1410 const tracy = trace(@src()); 1411 defer tracy.end(); 1412 1413 const src: LazySrcLoc = sema.src; 1414 const bin_inst = sema.code.instructions.items(.data)[inst].bin; 1415 const pointee_ty = try sema.resolveType(block, src, bin_inst.lhs); 1416 const ptr = sema.resolveInst(bin_inst.rhs); 1417 1418 const addr_space = target_util.defaultAddressSpace(sema.mod.getTarget(), .local); 1419 1420 if (Air.refToIndex(ptr)) |ptr_inst| { 1421 if (sema.air_instructions.items(.tag)[ptr_inst] == .constant) { 1422 const air_datas = sema.air_instructions.items(.data); 1423 const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; 1424 switch (ptr_val.tag()) { 1425 .inferred_alloc => { 1426 const inferred_alloc = &ptr_val.castTag(.inferred_alloc).?.data; 1427 // Add the stored instruction to the set we will use to resolve peer types 1428 // for the inferred allocation. 1429 // This instruction will not make it to codegen; it is only to participate 1430 // in the `stored_inst_list` of the `inferred_alloc`. 1431 var trash_block = block.makeSubBlock(); 1432 defer trash_block.instructions.deinit(sema.gpa); 1433 const operand = try trash_block.addBitCast(pointee_ty, .void_value); 1434 1435 try inferred_alloc.stored_inst_list.append(sema.arena, operand); 1436 1437 try sema.requireRuntimeBlock(block, src); 1438 const ptr_ty = try Type.ptr(sema.arena, .{ 1439 .pointee_type = pointee_ty, 1440 .@"align" = inferred_alloc.alignment, 1441 .@"addrspace" = addr_space, 1442 }); 1443 const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); 1444 return bitcasted_ptr; 1445 }, 1446 .inferred_alloc_comptime => { 1447 const iac = ptr_val.castTag(.inferred_alloc_comptime).?; 1448 // There will be only one coerce_result_ptr because we are running at comptime. 1449 // The alloc will turn into a Decl. 1450 var anon_decl = try block.startAnonDecl(); 1451 defer anon_decl.deinit(); 1452 iac.data.decl = try anon_decl.finish( 1453 try pointee_ty.copy(anon_decl.arena()), 1454 Value.undef, 1455 ); 1456 const ptr_ty = try Type.ptr(sema.arena, .{ 1457 .pointee_type = pointee_ty, 1458 .@"align" = iac.data.alignment, 1459 .@"addrspace" = addr_space, 1460 }); 1461 return sema.addConstant( 1462 ptr_ty, 1463 try Value.Tag.decl_ref_mut.create(sema.arena, .{ 1464 .decl = iac.data.decl, 1465 .runtime_index = block.runtime_index, 1466 }), 1467 ); 1468 }, 1469 .decl_ref_mut => { 1470 const ptr_ty = try Type.ptr(sema.arena, .{ 1471 .pointee_type = pointee_ty, 1472 .@"addrspace" = addr_space, 1473 }); 1474 return sema.addConstant(ptr_ty, ptr_val); 1475 }, 1476 else => {}, 1477 } 1478 } 1479 } 1480 1481 try sema.requireRuntimeBlock(block, src); 1482 1483 // Make a dummy store through the pointer to test the coercion. 1484 // We will then use the generated instructions to decide what 1485 // kind of transformations to make on the result pointer. 1486 var trash_block = block.makeSubBlock(); 1487 defer trash_block.instructions.deinit(sema.gpa); 1488 1489 const dummy_operand = try trash_block.addBitCast(pointee_ty, .void_value); 1490 try sema.storePtr(&trash_block, src, ptr, dummy_operand); 1491 1492 { 1493 const air_tags = sema.air_instructions.items(.tag); 1494 1495 //std.debug.print("dummy storePtr instructions:\n", .{}); 1496 //for (trash_block.instructions.items) |item| { 1497 // std.debug.print(" {s}\n", .{@tagName(air_tags[item])}); 1498 //} 1499 1500 // The last one is always `store`. 1501 const trash_inst = trash_block.instructions.pop(); 1502 assert(air_tags[trash_inst] == .store); 1503 assert(trash_inst == sema.air_instructions.len - 1); 1504 sema.air_instructions.len -= 1; 1505 } 1506 1507 const ptr_ty = try Type.ptr(sema.arena, .{ 1508 .pointee_type = pointee_ty, 1509 .@"addrspace" = addr_space, 1510 }); 1511 1512 var new_ptr = ptr; 1513 1514 while (true) { 1515 const air_tags = sema.air_instructions.items(.tag); 1516 const air_datas = sema.air_instructions.items(.data); 1517 const trash_inst = trash_block.instructions.pop(); 1518 switch (air_tags[trash_inst]) { 1519 .bitcast => { 1520 if (Air.indexToRef(trash_inst) == dummy_operand) { 1521 return block.addBitCast(ptr_ty, new_ptr); 1522 } 1523 const ty_op = air_datas[trash_inst].ty_op; 1524 const operand_ty = sema.getTmpAir().typeOf(ty_op.operand); 1525 const ptr_operand_ty = try Type.ptr(sema.arena, .{ 1526 .pointee_type = operand_ty, 1527 .@"addrspace" = addr_space, 1528 }); 1529 new_ptr = try block.addBitCast(ptr_operand_ty, new_ptr); 1530 }, 1531 .wrap_optional => { 1532 const ty_op = air_datas[trash_inst].ty_op; 1533 const payload_ty = sema.getTmpAir().typeOf(ty_op.operand); 1534 const ptr_payload_ty = try Type.ptr(sema.arena, .{ 1535 .pointee_type = payload_ty, 1536 .@"addrspace" = addr_space, 1537 }); 1538 new_ptr = try block.addTyOp(.optional_payload_ptr_set, ptr_payload_ty, new_ptr); 1539 }, 1540 .wrap_errunion_err => { 1541 return sema.fail(block, src, "TODO coerce_result_ptr wrap_errunion_err", .{}); 1542 }, 1543 .wrap_errunion_payload => { 1544 return sema.fail(block, src, "TODO coerce_result_ptr wrap_errunion_payload", .{}); 1545 }, 1546 else => { 1547 if (std.debug.runtime_safety) { 1548 std.debug.panic("unexpected AIR tag for coerce_result_ptr: {s}", .{ 1549 air_tags[trash_inst], 1550 }); 1551 } else { 1552 unreachable; 1553 } 1554 }, 1555 } 1556 } else unreachable; // TODO should not need else unreachable 1557} 1558 1559pub fn analyzeStructDecl( 1560 sema: *Sema, 1561 new_decl: *Decl, 1562 inst: Zir.Inst.Index, 1563 struct_obj: *Module.Struct, 1564) SemaError!void { 1565 const extended = sema.code.instructions.items(.data)[inst].extended; 1566 assert(extended.opcode == .struct_decl); 1567 const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); 1568 1569 struct_obj.known_has_bits = small.known_has_bits; 1570 1571 var extra_index: usize = extended.operand; 1572 extra_index += @boolToInt(small.has_src_node); 1573 extra_index += @boolToInt(small.has_body_len); 1574 extra_index += @boolToInt(small.has_fields_len); 1575 const decls_len = if (small.has_decls_len) blk: { 1576 const decls_len = sema.code.extra[extra_index]; 1577 extra_index += 1; 1578 break :blk decls_len; 1579 } else 0; 1580 1581 _ = try sema.mod.scanNamespace(&struct_obj.namespace, extra_index, decls_len, new_decl); 1582} 1583 1584fn zirStructDecl( 1585 sema: *Sema, 1586 block: *Block, 1587 extended: Zir.Inst.Extended.InstData, 1588 inst: Zir.Inst.Index, 1589) CompileError!Air.Inst.Ref { 1590 const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); 1591 const src: LazySrcLoc = if (small.has_src_node) blk: { 1592 const node_offset = @bitCast(i32, sema.code.extra[extended.operand]); 1593 break :blk .{ .node_offset = node_offset }; 1594 } else sema.src; 1595 1596 var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); 1597 errdefer new_decl_arena.deinit(); 1598 const new_decl_arena_allocator = new_decl_arena.allocator(); 1599 1600 const struct_obj = try new_decl_arena_allocator.create(Module.Struct); 1601 const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); 1602 const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); 1603 const type_name = try sema.createTypeName(block, small.name_strategy); 1604 const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{ 1605 .ty = Type.type, 1606 .val = struct_val, 1607 }, type_name); 1608 new_decl.owns_tv = true; 1609 errdefer sema.mod.abortAnonDecl(new_decl); 1610 struct_obj.* = .{ 1611 .owner_decl = new_decl, 1612 .fields = .{}, 1613 .node_offset = src.node_offset, 1614 .zir_index = inst, 1615 .layout = small.layout, 1616 .status = .none, 1617 .known_has_bits = undefined, 1618 .namespace = .{ 1619 .parent = block.namespace, 1620 .ty = struct_ty, 1621 .file_scope = block.getFileScope(), 1622 }, 1623 }; 1624 std.log.scoped(.module).debug("create struct {*} owned by {*} ({s})", .{ 1625 &struct_obj.namespace, new_decl, new_decl.name, 1626 }); 1627 try sema.analyzeStructDecl(new_decl, inst, struct_obj); 1628 try new_decl.finalizeNewArena(&new_decl_arena); 1629 return sema.analyzeDeclVal(block, src, new_decl); 1630} 1631 1632fn createTypeName(sema: *Sema, block: *Block, name_strategy: Zir.Inst.NameStrategy) ![:0]u8 { 1633 switch (name_strategy) { 1634 .anon => { 1635 // It would be neat to have "struct:line:column" but this name has 1636 // to survive incremental updates, where it may have been shifted down 1637 // or up to a different line, but unchanged, and thus not unnecessarily 1638 // semantically analyzed. 1639 const name_index = sema.mod.getNextAnonNameIndex(); 1640 return std.fmt.allocPrintZ(sema.gpa, "{s}__anon_{d}", .{ 1641 block.src_decl.name, name_index, 1642 }); 1643 }, 1644 .parent => return sema.gpa.dupeZ(u8, mem.sliceTo(block.src_decl.name, 0)), 1645 .func => { 1646 const name_index = sema.mod.getNextAnonNameIndex(); 1647 const name = try std.fmt.allocPrintZ(sema.gpa, "{s}__anon_{d}", .{ 1648 block.src_decl.name, name_index, 1649 }); 1650 log.warn("TODO: handle NameStrategy.func correctly instead of using anon name '{s}'", .{ 1651 name, 1652 }); 1653 return name; 1654 }, 1655 } 1656} 1657 1658fn zirEnumDecl( 1659 sema: *Sema, 1660 block: *Block, 1661 extended: Zir.Inst.Extended.InstData, 1662) CompileError!Air.Inst.Ref { 1663 const tracy = trace(@src()); 1664 defer tracy.end(); 1665 1666 const mod = sema.mod; 1667 const gpa = sema.gpa; 1668 const small = @bitCast(Zir.Inst.EnumDecl.Small, extended.small); 1669 var extra_index: usize = extended.operand; 1670 1671 const src: LazySrcLoc = if (small.has_src_node) blk: { 1672 const node_offset = @bitCast(i32, sema.code.extra[extra_index]); 1673 extra_index += 1; 1674 break :blk .{ .node_offset = node_offset }; 1675 } else sema.src; 1676 1677 const tag_type_ref = if (small.has_tag_type) blk: { 1678 const tag_type_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); 1679 extra_index += 1; 1680 break :blk tag_type_ref; 1681 } else .none; 1682 1683 const body_len = if (small.has_body_len) blk: { 1684 const body_len = sema.code.extra[extra_index]; 1685 extra_index += 1; 1686 break :blk body_len; 1687 } else 0; 1688 1689 const fields_len = if (small.has_fields_len) blk: { 1690 const fields_len = sema.code.extra[extra_index]; 1691 extra_index += 1; 1692 break :blk fields_len; 1693 } else 0; 1694 1695 const decls_len = if (small.has_decls_len) blk: { 1696 const decls_len = sema.code.extra[extra_index]; 1697 extra_index += 1; 1698 break :blk decls_len; 1699 } else 0; 1700 1701 var new_decl_arena = std.heap.ArenaAllocator.init(gpa); 1702 errdefer new_decl_arena.deinit(); 1703 const new_decl_arena_allocator = new_decl_arena.allocator(); 1704 1705 const enum_obj = try new_decl_arena_allocator.create(Module.EnumFull); 1706 const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumFull); 1707 enum_ty_payload.* = .{ 1708 .base = .{ .tag = if (small.nonexhaustive) .enum_nonexhaustive else .enum_full }, 1709 .data = enum_obj, 1710 }; 1711 const enum_ty = Type.initPayload(&enum_ty_payload.base); 1712 const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); 1713 const type_name = try sema.createTypeName(block, small.name_strategy); 1714 const new_decl = try mod.createAnonymousDeclNamed(block, .{ 1715 .ty = Type.type, 1716 .val = enum_val, 1717 }, type_name); 1718 new_decl.owns_tv = true; 1719 errdefer mod.abortAnonDecl(new_decl); 1720 1721 enum_obj.* = .{ 1722 .owner_decl = new_decl, 1723 .tag_ty = Type.initTag(.@"null"), 1724 .fields = .{}, 1725 .values = .{}, 1726 .node_offset = src.node_offset, 1727 .namespace = .{ 1728 .parent = block.namespace, 1729 .ty = enum_ty, 1730 .file_scope = block.getFileScope(), 1731 }, 1732 }; 1733 std.log.scoped(.module).debug("create enum {*} owned by {*} ({s})", .{ 1734 &enum_obj.namespace, new_decl, new_decl.name, 1735 }); 1736 1737 extra_index = try mod.scanNamespace(&enum_obj.namespace, extra_index, decls_len, new_decl); 1738 1739 const body = sema.code.extra[extra_index..][0..body_len]; 1740 if (fields_len == 0) { 1741 assert(body.len == 0); 1742 try new_decl.finalizeNewArena(&new_decl_arena); 1743 return sema.analyzeDeclVal(block, src, new_decl); 1744 } 1745 extra_index += body.len; 1746 1747 const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable; 1748 const body_end = extra_index; 1749 extra_index += bit_bags_count; 1750 1751 { 1752 // We create a block for the field type instructions because they 1753 // may need to reference Decls from inside the enum namespace. 1754 // Within the field type, default value, and alignment expressions, the "owner decl" 1755 // should be the enum itself. 1756 1757 const prev_owner_decl = sema.owner_decl; 1758 sema.owner_decl = new_decl; 1759 defer sema.owner_decl = prev_owner_decl; 1760 1761 const prev_owner_func = sema.owner_func; 1762 sema.owner_func = null; 1763 defer sema.owner_func = prev_owner_func; 1764 1765 const prev_func = sema.func; 1766 sema.func = null; 1767 defer sema.func = prev_func; 1768 1769 var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope); 1770 defer wip_captures.deinit(); 1771 1772 var enum_block: Block = .{ 1773 .parent = null, 1774 .sema = sema, 1775 .src_decl = new_decl, 1776 .namespace = &enum_obj.namespace, 1777 .wip_capture_scope = wip_captures.scope, 1778 .instructions = .{}, 1779 .inlining = null, 1780 .is_comptime = true, 1781 }; 1782 defer assert(enum_block.instructions.items.len == 0); // should all be comptime instructions 1783 1784 if (body.len != 0) { 1785 _ = try sema.analyzeBody(&enum_block, body); 1786 } 1787 1788 try wip_captures.finalize(); 1789 1790 const tag_ty = blk: { 1791 if (tag_type_ref != .none) { 1792 // TODO better source location 1793 break :blk try sema.resolveType(block, src, tag_type_ref); 1794 } 1795 const bits = std.math.log2_int_ceil(usize, fields_len); 1796 break :blk try Type.Tag.int_unsigned.create(new_decl_arena_allocator, bits); 1797 }; 1798 enum_obj.tag_ty = tag_ty; 1799 } 1800 1801 try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); 1802 const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| { 1803 if (bag != 0) break true; 1804 } else false; 1805 if (any_values) { 1806 try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{ 1807 .ty = enum_obj.tag_ty, 1808 }); 1809 } 1810 1811 var bit_bag_index: usize = body_end; 1812 var cur_bit_bag: u32 = undefined; 1813 var field_i: u32 = 0; 1814 while (field_i < fields_len) : (field_i += 1) { 1815 if (field_i % 32 == 0) { 1816 cur_bit_bag = sema.code.extra[bit_bag_index]; 1817 bit_bag_index += 1; 1818 } 1819 const has_tag_value = @truncate(u1, cur_bit_bag) != 0; 1820 cur_bit_bag >>= 1; 1821 1822 const field_name_zir = sema.code.nullTerminatedString(sema.code.extra[extra_index]); 1823 extra_index += 1; 1824 1825 // This string needs to outlive the ZIR code. 1826 const field_name = try new_decl_arena_allocator.dupe(u8, field_name_zir); 1827 1828 const gop = enum_obj.fields.getOrPutAssumeCapacity(field_name); 1829 if (gop.found_existing) { 1830 const tree = try sema.getAstTree(block); 1831 const field_src = enumFieldSrcLoc(block.src_decl, tree.*, src.node_offset, field_i); 1832 const other_tag_src = enumFieldSrcLoc(block.src_decl, tree.*, src.node_offset, gop.index); 1833 const msg = msg: { 1834 const msg = try sema.errMsg(block, field_src, "duplicate enum tag", .{}); 1835 errdefer msg.destroy(gpa); 1836 try sema.errNote(block, other_tag_src, msg, "other tag here", .{}); 1837 break :msg msg; 1838 }; 1839 return sema.failWithOwnedErrorMsg(msg); 1840 } 1841 1842 if (has_tag_value) { 1843 const tag_val_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); 1844 extra_index += 1; 1845 // TODO: if we need to report an error here, use a source location 1846 // that points to this default value expression rather than the struct. 1847 // But only resolve the source location if we need to emit a compile error. 1848 const tag_val = (try sema.resolveInstConst(block, src, tag_val_ref)).val; 1849 const copied_tag_val = try tag_val.copy(new_decl_arena_allocator); 1850 enum_obj.values.putAssumeCapacityNoClobberContext(copied_tag_val, {}, .{ 1851 .ty = enum_obj.tag_ty, 1852 }); 1853 } else if (any_values) { 1854 const tag_val = try Value.Tag.int_u64.create(new_decl_arena_allocator, field_i); 1855 enum_obj.values.putAssumeCapacityNoClobberContext(tag_val, {}, .{ .ty = enum_obj.tag_ty }); 1856 } 1857 } 1858 1859 try new_decl.finalizeNewArena(&new_decl_arena); 1860 return sema.analyzeDeclVal(block, src, new_decl); 1861} 1862 1863fn zirUnionDecl( 1864 sema: *Sema, 1865 block: *Block, 1866 extended: Zir.Inst.Extended.InstData, 1867 inst: Zir.Inst.Index, 1868) CompileError!Air.Inst.Ref { 1869 const tracy = trace(@src()); 1870 defer tracy.end(); 1871 1872 const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small); 1873 var extra_index: usize = extended.operand; 1874 1875 const src: LazySrcLoc = if (small.has_src_node) blk: { 1876 const node_offset = @bitCast(i32, sema.code.extra[extra_index]); 1877 extra_index += 1; 1878 break :blk .{ .node_offset = node_offset }; 1879 } else sema.src; 1880 1881 extra_index += @boolToInt(small.has_tag_type); 1882 extra_index += @boolToInt(small.has_body_len); 1883 extra_index += @boolToInt(small.has_fields_len); 1884 1885 const decls_len = if (small.has_decls_len) blk: { 1886 const decls_len = sema.code.extra[extra_index]; 1887 extra_index += 1; 1888 break :blk decls_len; 1889 } else 0; 1890 1891 var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); 1892 errdefer new_decl_arena.deinit(); 1893 const new_decl_arena_allocator = new_decl_arena.allocator(); 1894 1895 const union_obj = try new_decl_arena_allocator.create(Module.Union); 1896 const type_tag: Type.Tag = if (small.has_tag_type or small.auto_enum_tag) .union_tagged else .@"union"; 1897 const union_payload = try new_decl_arena_allocator.create(Type.Payload.Union); 1898 union_payload.* = .{ 1899 .base = .{ .tag = type_tag }, 1900 .data = union_obj, 1901 }; 1902 const union_ty = Type.initPayload(&union_payload.base); 1903 const union_val = try Value.Tag.ty.create(new_decl_arena_allocator, union_ty); 1904 const type_name = try sema.createTypeName(block, small.name_strategy); 1905 const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{ 1906 .ty = Type.type, 1907 .val = union_val, 1908 }, type_name); 1909 new_decl.owns_tv = true; 1910 errdefer sema.mod.abortAnonDecl(new_decl); 1911 union_obj.* = .{ 1912 .owner_decl = new_decl, 1913 .tag_ty = Type.initTag(.@"null"), 1914 .fields = .{}, 1915 .node_offset = src.node_offset, 1916 .zir_index = inst, 1917 .layout = small.layout, 1918 .status = .none, 1919 .namespace = .{ 1920 .parent = block.namespace, 1921 .ty = union_ty, 1922 .file_scope = block.getFileScope(), 1923 }, 1924 }; 1925 std.log.scoped(.module).debug("create union {*} owned by {*} ({s})", .{ 1926 &union_obj.namespace, new_decl, new_decl.name, 1927 }); 1928 1929 _ = try sema.mod.scanNamespace(&union_obj.namespace, extra_index, decls_len, new_decl); 1930 1931 try new_decl.finalizeNewArena(&new_decl_arena); 1932 return sema.analyzeDeclVal(block, src, new_decl); 1933} 1934 1935fn zirOpaqueDecl( 1936 sema: *Sema, 1937 block: *Block, 1938 extended: Zir.Inst.Extended.InstData, 1939) CompileError!Air.Inst.Ref { 1940 const tracy = trace(@src()); 1941 defer tracy.end(); 1942 1943 const mod = sema.mod; 1944 const gpa = sema.gpa; 1945 const small = @bitCast(Zir.Inst.OpaqueDecl.Small, extended.small); 1946 var extra_index: usize = extended.operand; 1947 1948 const src: LazySrcLoc = if (small.has_src_node) blk: { 1949 const node_offset = @bitCast(i32, sema.code.extra[extra_index]); 1950 extra_index += 1; 1951 break :blk .{ .node_offset = node_offset }; 1952 } else sema.src; 1953 1954 const decls_len = if (small.has_decls_len) blk: { 1955 const decls_len = sema.code.extra[extra_index]; 1956 extra_index += 1; 1957 break :blk decls_len; 1958 } else 0; 1959 1960 var new_decl_arena = std.heap.ArenaAllocator.init(gpa); 1961 errdefer new_decl_arena.deinit(); 1962 const new_decl_arena_allocator = new_decl_arena.allocator(); 1963 1964 const opaque_obj = try new_decl_arena_allocator.create(Module.Opaque); 1965 const opaque_ty_payload = try new_decl_arena_allocator.create(Type.Payload.Opaque); 1966 opaque_ty_payload.* = .{ 1967 .base = .{ .tag = .@"opaque" }, 1968 .data = opaque_obj, 1969 }; 1970 const opaque_ty = Type.initPayload(&opaque_ty_payload.base); 1971 const opaque_val = try Value.Tag.ty.create(new_decl_arena_allocator, opaque_ty); 1972 const type_name = try sema.createTypeName(block, small.name_strategy); 1973 const new_decl = try mod.createAnonymousDeclNamed(block, .{ 1974 .ty = Type.type, 1975 .val = opaque_val, 1976 }, type_name); 1977 new_decl.owns_tv = true; 1978 errdefer mod.abortAnonDecl(new_decl); 1979 1980 opaque_obj.* = .{ 1981 .owner_decl = new_decl, 1982 .node_offset = src.node_offset, 1983 .namespace = .{ 1984 .parent = block.namespace, 1985 .ty = opaque_ty, 1986 .file_scope = block.getFileScope(), 1987 }, 1988 }; 1989 std.log.scoped(.module).debug("create opaque {*} owned by {*} ({s})", .{ 1990 &opaque_obj.namespace, new_decl, new_decl.name, 1991 }); 1992 1993 extra_index = try mod.scanNamespace(&opaque_obj.namespace, extra_index, decls_len, new_decl); 1994 1995 try new_decl.finalizeNewArena(&new_decl_arena); 1996 return sema.analyzeDeclVal(block, src, new_decl); 1997} 1998 1999fn zirErrorSetDecl( 2000 sema: *Sema, 2001 block: *Block, 2002 inst: Zir.Inst.Index, 2003 name_strategy: Zir.Inst.NameStrategy, 2004) CompileError!Air.Inst.Ref { 2005 const tracy = trace(@src()); 2006 defer tracy.end(); 2007 2008 const gpa = sema.gpa; 2009 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 2010 const src = inst_data.src(); 2011 const extra = sema.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index); 2012 const fields = sema.code.extra[extra.end..][0..extra.data.fields_len]; 2013 2014 var new_decl_arena = std.heap.ArenaAllocator.init(gpa); 2015 errdefer new_decl_arena.deinit(); 2016 const new_decl_arena_allocator = new_decl_arena.allocator(); 2017 2018 const error_set = try new_decl_arena_allocator.create(Module.ErrorSet); 2019 const error_set_ty = try Type.Tag.error_set.create(new_decl_arena_allocator, error_set); 2020 const error_set_val = try Value.Tag.ty.create(new_decl_arena_allocator, error_set_ty); 2021 const type_name = try sema.createTypeName(block, name_strategy); 2022 const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{ 2023 .ty = Type.type, 2024 .val = error_set_val, 2025 }, type_name); 2026 new_decl.owns_tv = true; 2027 errdefer sema.mod.abortAnonDecl(new_decl); 2028 const names = try new_decl_arena_allocator.alloc([]const u8, fields.len); 2029 for (fields) |str_index, i| { 2030 names[i] = try new_decl_arena_allocator.dupe(u8, sema.code.nullTerminatedString(str_index)); 2031 } 2032 error_set.* = .{ 2033 .owner_decl = new_decl, 2034 .node_offset = inst_data.src_node, 2035 .names_ptr = names.ptr, 2036 .names_len = @intCast(u32, names.len), 2037 }; 2038 try new_decl.finalizeNewArena(&new_decl_arena); 2039 return sema.analyzeDeclVal(block, src, new_decl); 2040} 2041 2042fn zirRetPtr( 2043 sema: *Sema, 2044 block: *Block, 2045 extended: Zir.Inst.Extended.InstData, 2046) CompileError!Air.Inst.Ref { 2047 const tracy = trace(@src()); 2048 defer tracy.end(); 2049 2050 const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; 2051 try sema.requireFunctionBlock(block, src); 2052 2053 if (block.is_comptime) { 2054 return sema.analyzeComptimeAlloc(block, sema.fn_ret_ty, 0); 2055 } 2056 2057 const ptr_type = try Type.ptr(sema.arena, .{ 2058 .pointee_type = sema.fn_ret_ty, 2059 .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), 2060 }); 2061 2062 if (block.inlining != null) { 2063 // We are inlining a function call; this should be emitted as an alloc, not a ret_ptr. 2064 // TODO when functions gain result location support, the inlining struct in 2065 // Block should contain the return pointer, and we would pass that through here. 2066 return block.addTy(.alloc, ptr_type); 2067 } 2068 2069 return block.addTy(.ret_ptr, ptr_type); 2070} 2071 2072fn zirRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 2073 const tracy = trace(@src()); 2074 defer tracy.end(); 2075 2076 const inst_data = sema.code.instructions.items(.data)[inst].un_tok; 2077 const operand = sema.resolveInst(inst_data.operand); 2078 return sema.analyzeRef(block, inst_data.src(), operand); 2079} 2080 2081fn zirRetType( 2082 sema: *Sema, 2083 block: *Block, 2084 extended: Zir.Inst.Extended.InstData, 2085) CompileError!Air.Inst.Ref { 2086 const tracy = trace(@src()); 2087 defer tracy.end(); 2088 2089 const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; 2090 try sema.requireFunctionBlock(block, src); 2091 return sema.addType(sema.fn_ret_ty); 2092} 2093 2094fn zirEnsureResultUsed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 2095 const tracy = trace(@src()); 2096 defer tracy.end(); 2097 2098 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 2099 const operand = sema.resolveInst(inst_data.operand); 2100 const src = inst_data.src(); 2101 2102 return sema.ensureResultUsed(block, operand, src); 2103} 2104 2105fn ensureResultUsed( 2106 sema: *Sema, 2107 block: *Block, 2108 operand: Air.Inst.Ref, 2109 src: LazySrcLoc, 2110) CompileError!void { 2111 const operand_ty = sema.typeOf(operand); 2112 switch (operand_ty.zigTypeTag()) { 2113 .Void, .NoReturn => return, 2114 else => return sema.fail(block, src, "expression value is ignored", .{}), 2115 } 2116} 2117 2118fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 2119 const tracy = trace(@src()); 2120 defer tracy.end(); 2121 2122 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 2123 const operand = sema.resolveInst(inst_data.operand); 2124 const src = inst_data.src(); 2125 const operand_ty = sema.typeOf(operand); 2126 switch (operand_ty.zigTypeTag()) { 2127 .ErrorSet, .ErrorUnion => return sema.fail(block, src, "error is discarded", .{}), 2128 else => return, 2129 } 2130} 2131 2132fn zirIndexablePtrLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 2133 const tracy = trace(@src()); 2134 defer tracy.end(); 2135 2136 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 2137 const src = inst_data.src(); 2138 const object = sema.resolveInst(inst_data.operand); 2139 const object_ty = sema.typeOf(object); 2140 2141 const is_pointer_to = object_ty.isSinglePointer(); 2142 2143 const array_ty = if (is_pointer_to) 2144 object_ty.childType() 2145 else 2146 object_ty; 2147 2148 if (!array_ty.isIndexable()) { 2149 const msg = msg: { 2150 const msg = try sema.errMsg( 2151 block, 2152 src, 2153 "type '{}' does not support indexing", 2154 .{array_ty}, 2155 ); 2156 errdefer msg.destroy(sema.gpa); 2157 try sema.errNote( 2158 block, 2159 src, 2160 msg, 2161 "for loop operand must be an array, slice, tuple, or vector", 2162 .{}, 2163 ); 2164 break :msg msg; 2165 }; 2166 return sema.failWithOwnedErrorMsg(msg); 2167 } 2168 2169 return sema.fieldVal(block, src, object, "len", src); 2170} 2171 2172fn zirAllocExtended( 2173 sema: *Sema, 2174 block: *Block, 2175 extended: Zir.Inst.Extended.InstData, 2176) CompileError!Air.Inst.Ref { 2177 const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); 2178 const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; 2179 const ty_src = src; // TODO better source location 2180 const align_src = src; // TODO better source location 2181 const small = @bitCast(Zir.Inst.AllocExtended.Small, extended.small); 2182 2183 var extra_index: usize = extra.end; 2184 2185 const var_ty: Type = if (small.has_type) blk: { 2186 const type_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); 2187 extra_index += 1; 2188 break :blk try sema.resolveType(block, ty_src, type_ref); 2189 } else undefined; 2190 2191 const alignment: u16 = if (small.has_align) blk: { 2192 const align_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); 2193 extra_index += 1; 2194 const alignment = try sema.resolveAlign(block, align_src, align_ref); 2195 break :blk alignment; 2196 } else 0; 2197 2198 const inferred_alloc_ty = if (small.is_const) 2199 Type.initTag(.inferred_alloc_const) 2200 else 2201 Type.initTag(.inferred_alloc_mut); 2202 2203 if (small.is_comptime) { 2204 if (small.has_type) { 2205 return sema.analyzeComptimeAlloc(block, var_ty, alignment); 2206 } else { 2207 return sema.addConstant( 2208 inferred_alloc_ty, 2209 try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ 2210 .decl = undefined, 2211 .alignment = alignment, 2212 }), 2213 ); 2214 } 2215 } 2216 2217 if (small.has_type) { 2218 if (!small.is_const) { 2219 try sema.validateVarType(block, ty_src, var_ty, false); 2220 } 2221 const ptr_type = try Type.ptr(sema.arena, .{ 2222 .pointee_type = var_ty, 2223 .@"align" = alignment, 2224 .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), 2225 }); 2226 try sema.requireRuntimeBlock(block, src); 2227 try sema.resolveTypeLayout(block, src, var_ty); 2228 return block.addTy(.alloc, ptr_type); 2229 } 2230 2231 // `Sema.addConstant` does not add the instruction to the block because it is 2232 // not needed in the case of constant values. However here, we plan to "downgrade" 2233 // to a normal instruction when we hit `resolve_inferred_alloc`. So we append 2234 // to the block even though it is currently a `.constant`. 2235 const result = try sema.addConstant( 2236 inferred_alloc_ty, 2237 try Value.Tag.inferred_alloc.create(sema.arena, .{ .alignment = alignment }), 2238 ); 2239 try sema.requireFunctionBlock(block, src); 2240 try block.instructions.append(sema.gpa, Air.refToIndex(result).?); 2241 return result; 2242} 2243 2244fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 2245 const tracy = trace(@src()); 2246 defer tracy.end(); 2247 2248 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 2249 const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; 2250 const var_ty = try sema.resolveType(block, ty_src, inst_data.operand); 2251 return sema.analyzeComptimeAlloc(block, var_ty, 0); 2252} 2253 2254fn zirAllocInferredComptime(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 2255 const src_node = sema.code.instructions.items(.data)[inst].node; 2256 const src: LazySrcLoc = .{ .node_offset = src_node }; 2257 sema.src = src; 2258 return sema.addConstant( 2259 Type.initTag(.inferred_alloc_mut), 2260 try Value.Tag.inferred_alloc_comptime.create(sema.arena, undefined), 2261 ); 2262} 2263 2264fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 2265 const tracy = trace(@src()); 2266 defer tracy.end(); 2267 2268 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 2269 const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; 2270 const var_decl_src = inst_data.src(); 2271 const var_ty = try sema.resolveType(block, ty_src, inst_data.operand); 2272 if (block.is_comptime) { 2273 return sema.analyzeComptimeAlloc(block, var_ty, 0); 2274 } 2275 const ptr_type = try Type.ptr(sema.arena, .{ 2276 .pointee_type = var_ty, 2277 .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), 2278 }); 2279 try sema.requireRuntimeBlock(block, var_decl_src); 2280 try sema.resolveTypeLayout(block, ty_src, var_ty); 2281 return block.addTy(.alloc, ptr_type); 2282} 2283 2284fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 2285 const tracy = trace(@src()); 2286 defer tracy.end(); 2287 2288 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 2289 const var_decl_src = inst_data.src(); 2290 const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; 2291 const var_ty = try sema.resolveType(block, ty_src, inst_data.operand); 2292 if (block.is_comptime) { 2293 return sema.analyzeComptimeAlloc(block, var_ty, 0); 2294 } 2295 try sema.validateVarType(block, ty_src, var_ty, false); 2296 const ptr_type = try Type.ptr(sema.arena, .{ 2297 .pointee_type = var_ty, 2298 .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), 2299 }); 2300 try sema.requireRuntimeBlock(block, var_decl_src); 2301 try sema.resolveTypeLayout(block, ty_src, var_ty); 2302 return block.addTy(.alloc, ptr_type); 2303} 2304 2305fn zirAllocInferred( 2306 sema: *Sema, 2307 block: *Block, 2308 inst: Zir.Inst.Index, 2309 inferred_alloc_ty: Type, 2310) CompileError!Air.Inst.Ref { 2311 const tracy = trace(@src()); 2312 defer tracy.end(); 2313 2314 const src_node = sema.code.instructions.items(.data)[inst].node; 2315 const src: LazySrcLoc = .{ .node_offset = src_node }; 2316 sema.src = src; 2317 2318 if (block.is_comptime) { 2319 return sema.addConstant( 2320 inferred_alloc_ty, 2321 try Value.Tag.inferred_alloc_comptime.create(sema.arena, undefined), 2322 ); 2323 } 2324 2325 // `Sema.addConstant` does not add the instruction to the block because it is 2326 // not needed in the case of constant values. However here, we plan to "downgrade" 2327 // to a normal instruction when we hit `resolve_inferred_alloc`. So we append 2328 // to the block even though it is currently a `.constant`. 2329 const result = try sema.addConstant( 2330 inferred_alloc_ty, 2331 try Value.Tag.inferred_alloc.create(sema.arena, .{ .alignment = 0 }), 2332 ); 2333 try sema.requireFunctionBlock(block, src); 2334 try block.instructions.append(sema.gpa, Air.refToIndex(result).?); 2335 return result; 2336} 2337 2338fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 2339 const tracy = trace(@src()); 2340 defer tracy.end(); 2341 2342 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 2343 const src = inst_data.src(); 2344 const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; 2345 const ptr = sema.resolveInst(inst_data.operand); 2346 const ptr_inst = Air.refToIndex(ptr).?; 2347 assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); 2348 const value_index = sema.air_instructions.items(.data)[ptr_inst].ty_pl.payload; 2349 const ptr_val = sema.air_values.items[value_index]; 2350 const var_is_mut = switch (sema.typeOf(ptr).tag()) { 2351 .inferred_alloc_const => false, 2352 .inferred_alloc_mut => true, 2353 else => unreachable, 2354 }; 2355 const target = sema.mod.getTarget(); 2356 2357 switch (ptr_val.tag()) { 2358 .inferred_alloc_comptime => { 2359 const iac = ptr_val.castTag(.inferred_alloc_comptime).?; 2360 const decl = iac.data.decl; 2361 try sema.mod.declareDeclDependency(sema.owner_decl, decl); 2362 2363 const final_elem_ty = try decl.ty.copy(sema.arena); 2364 const final_ptr_ty = try Type.ptr(sema.arena, .{ 2365 .pointee_type = final_elem_ty, 2366 .@"align" = iac.data.alignment, 2367 .@"addrspace" = target_util.defaultAddressSpace(target, .local), 2368 }); 2369 const final_ptr_ty_inst = try sema.addType(final_ptr_ty); 2370 sema.air_instructions.items(.data)[ptr_inst].ty_pl.ty = final_ptr_ty_inst; 2371 2372 if (var_is_mut) { 2373 sema.air_values.items[value_index] = try Value.Tag.decl_ref_mut.create(sema.arena, .{ 2374 .decl = decl, 2375 .runtime_index = block.runtime_index, 2376 }); 2377 } else { 2378 sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, decl); 2379 } 2380 }, 2381 .inferred_alloc => { 2382 const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; 2383 const peer_inst_list = inferred_alloc.data.stored_inst_list.items; 2384 const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list, .none); 2385 2386 try sema.requireRuntimeBlock(block, src); 2387 try sema.resolveTypeLayout(block, ty_src, final_elem_ty); 2388 2389 if (var_is_mut) { 2390 try sema.validateVarType(block, ty_src, final_elem_ty, false); 2391 } 2392 // Change it to a normal alloc. 2393 const final_ptr_ty = try Type.ptr(sema.arena, .{ 2394 .pointee_type = final_elem_ty, 2395 .@"align" = inferred_alloc.data.alignment, 2396 .@"addrspace" = target_util.defaultAddressSpace(target, .local), 2397 }); 2398 sema.air_instructions.set(ptr_inst, .{ 2399 .tag = .alloc, 2400 .data = .{ .ty = final_ptr_ty }, 2401 }); 2402 }, 2403 else => unreachable, 2404 } 2405} 2406 2407fn zirValidateStructInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 2408 const tracy = trace(@src()); 2409 defer tracy.end(); 2410 2411 const validate_inst = sema.code.instructions.items(.data)[inst].pl_node; 2412 const init_src = validate_inst.src(); 2413 const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); 2414 const instrs = sema.code.extra[validate_extra.end..][0..validate_extra.data.body_len]; 2415 const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; 2416 const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; 2417 const object_ptr = sema.resolveInst(field_ptr_extra.lhs); 2418 const agg_ty = sema.typeOf(object_ptr).childType(); 2419 switch (agg_ty.zigTypeTag()) { 2420 .Struct => return sema.validateStructInit( 2421 block, 2422 agg_ty.castTag(.@"struct").?.data, 2423 init_src, 2424 instrs, 2425 ), 2426 .Union => return sema.validateUnionInit( 2427 block, 2428 agg_ty.cast(Type.Payload.Union).?.data, 2429 init_src, 2430 instrs, 2431 object_ptr, 2432 ), 2433 else => unreachable, 2434 } 2435} 2436 2437fn validateUnionInit( 2438 sema: *Sema, 2439 block: *Block, 2440 union_obj: *Module.Union, 2441 init_src: LazySrcLoc, 2442 instrs: []const Zir.Inst.Index, 2443 union_ptr: Air.Inst.Ref, 2444) CompileError!void { 2445 if (instrs.len != 1) { 2446 // TODO add note for other field 2447 // TODO add note for union declared here 2448 return sema.fail(block, init_src, "only one union field can be active at once", .{}); 2449 } 2450 2451 const field_ptr = instrs[0]; 2452 const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node; 2453 const field_src: LazySrcLoc = .{ .node_offset_back2tok = field_ptr_data.src_node }; 2454 const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; 2455 const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start); 2456 const field_index_big = union_obj.fields.getIndex(field_name) orelse 2457 return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name); 2458 const field_index = @intCast(u32, field_index_big); 2459 2460 // Handle the possibility of the union value being comptime-known. 2461 const union_ptr_inst = Air.refToIndex(sema.resolveInst(field_ptr_extra.lhs)).?; 2462 switch (sema.air_instructions.items(.tag)[union_ptr_inst]) { 2463 .constant => return, // In this case the tag has already been set. No validation to do. 2464 .bitcast => { 2465 // TODO here we need to go back and see if we need to convert the union 2466 // to a comptime-known value. In such case, we must delete all the instructions 2467 // added to the current block starting with the bitcast. 2468 // If the bitcast result ptr is an alloc, the alloc should be replaced with 2469 // a constant decl_ref. 2470 // Otherwise, the bitcast should be preserved and a store instruction should be 2471 // emitted to store the constant union value through the bitcast. 2472 }, 2473 else => |t| { 2474 if (std.debug.runtime_safety) { 2475 std.debug.panic("unexpected AIR tag for union pointer: {s}", .{@tagName(t)}); 2476 } else { 2477 unreachable; 2478 } 2479 }, 2480 } 2481 2482 // Otherwise, we set the new union tag now. 2483 const new_tag = try sema.addConstant( 2484 union_obj.tag_ty, 2485 try Value.Tag.enum_field_index.create(sema.arena, field_index), 2486 ); 2487 2488 try sema.requireRuntimeBlock(block, init_src); 2489 _ = try block.addBinOp(.set_union_tag, union_ptr, new_tag); 2490} 2491 2492fn validateStructInit( 2493 sema: *Sema, 2494 block: *Block, 2495 struct_obj: *Module.Struct, 2496 init_src: LazySrcLoc, 2497 instrs: []const Zir.Inst.Index, 2498) CompileError!void { 2499 const gpa = sema.gpa; 2500 2501 // Maps field index to field_ptr index of where it was already initialized. 2502 const found_fields = try gpa.alloc(Zir.Inst.Index, struct_obj.fields.count()); 2503 defer gpa.free(found_fields); 2504 mem.set(Zir.Inst.Index, found_fields, 0); 2505 2506 for (instrs) |field_ptr| { 2507 const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node; 2508 const field_src: LazySrcLoc = .{ .node_offset_back2tok = field_ptr_data.src_node }; 2509 const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; 2510 const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start); 2511 const field_index = struct_obj.fields.getIndex(field_name) orelse 2512 return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); 2513 if (found_fields[field_index] != 0) { 2514 const other_field_ptr = found_fields[field_index]; 2515 const other_field_ptr_data = sema.code.instructions.items(.data)[other_field_ptr].pl_node; 2516 const other_field_src: LazySrcLoc = .{ .node_offset_back2tok = other_field_ptr_data.src_node }; 2517 const msg = msg: { 2518 const msg = try sema.errMsg(block, field_src, "duplicate field", .{}); 2519 errdefer msg.destroy(gpa); 2520 try sema.errNote(block, other_field_src, msg, "other field here", .{}); 2521 break :msg msg; 2522 }; 2523 return sema.failWithOwnedErrorMsg(msg); 2524 } 2525 found_fields[field_index] = field_ptr; 2526 } 2527 2528 var root_msg: ?*Module.ErrorMsg = null; 2529 2530 // TODO handle default struct field values 2531 for (found_fields) |field_ptr, i| { 2532 if (field_ptr != 0) continue; 2533 2534 const field_name = struct_obj.fields.keys()[i]; 2535 const template = "missing struct field: {s}"; 2536 const args = .{field_name}; 2537 if (root_msg) |msg| { 2538 try sema.errNote(block, init_src, msg, template, args); 2539 } else { 2540 root_msg = try sema.errMsg(block, init_src, template, args); 2541 } 2542 } 2543 if (root_msg) |msg| { 2544 const fqn = try struct_obj.getFullyQualifiedName(gpa); 2545 defer gpa.free(fqn); 2546 try sema.mod.errNoteNonLazy( 2547 struct_obj.srcLoc(), 2548 msg, 2549 "struct '{s}' declared here", 2550 .{fqn}, 2551 ); 2552 return sema.failWithOwnedErrorMsg(msg); 2553 } 2554} 2555 2556fn zirValidateArrayInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 2557 const validate_inst = sema.code.instructions.items(.data)[inst].pl_node; 2558 const init_src = validate_inst.src(); 2559 const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); 2560 const instrs = sema.code.extra[validate_extra.end..][0..validate_extra.data.body_len]; 2561 const elem_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; 2562 const elem_ptr_extra = sema.code.extraData(Zir.Inst.ElemPtrImm, elem_ptr_data.payload_index).data; 2563 const array_ptr = sema.resolveInst(elem_ptr_extra.ptr); 2564 const array_ty = sema.typeOf(array_ptr).childType(); 2565 const array_len = array_ty.arrayLen(); 2566 2567 if (instrs.len != array_len) { 2568 return sema.fail(block, init_src, "expected {d} array elements; found {d}", .{ 2569 array_len, instrs.len, 2570 }); 2571 } 2572} 2573 2574fn failWithBadMemberAccess( 2575 sema: *Sema, 2576 block: *Block, 2577 agg_ty: Type, 2578 field_src: LazySrcLoc, 2579 field_name: []const u8, 2580) CompileError { 2581 const kw_name = switch (agg_ty.zigTypeTag()) { 2582 .Union => "union", 2583 .Struct => "struct", 2584 .Opaque => "opaque", 2585 .Enum => "enum", 2586 else => unreachable, 2587 }; 2588 const msg = msg: { 2589 const msg = try sema.errMsg(block, field_src, "{s} '{}' has no member named '{s}'", .{ 2590 kw_name, agg_ty, field_name, 2591 }); 2592 errdefer msg.destroy(sema.gpa); 2593 try sema.addDeclaredHereNote(msg, agg_ty); 2594 break :msg msg; 2595 }; 2596 return sema.failWithOwnedErrorMsg(msg); 2597} 2598 2599fn failWithBadStructFieldAccess( 2600 sema: *Sema, 2601 block: *Block, 2602 struct_obj: *Module.Struct, 2603 field_src: LazySrcLoc, 2604 field_name: []const u8, 2605) CompileError { 2606 const gpa = sema.gpa; 2607 2608 const fqn = try struct_obj.getFullyQualifiedName(gpa); 2609 defer gpa.free(fqn); 2610 2611 const msg = msg: { 2612 const msg = try sema.errMsg( 2613 block, 2614 field_src, 2615 "no field named '{s}' in struct '{s}'", 2616 .{ field_name, fqn }, 2617 ); 2618 errdefer msg.destroy(gpa); 2619 try sema.mod.errNoteNonLazy(struct_obj.srcLoc(), msg, "struct declared here", .{}); 2620 break :msg msg; 2621 }; 2622 return sema.failWithOwnedErrorMsg(msg); 2623} 2624 2625fn failWithBadUnionFieldAccess( 2626 sema: *Sema, 2627 block: *Block, 2628 union_obj: *Module.Union, 2629 field_src: LazySrcLoc, 2630 field_name: []const u8, 2631) CompileError { 2632 const gpa = sema.gpa; 2633 2634 const fqn = try union_obj.getFullyQualifiedName(gpa); 2635 defer gpa.free(fqn); 2636 2637 const msg = msg: { 2638 const msg = try sema.errMsg( 2639 block, 2640 field_src, 2641 "no field named '{s}' in union '{s}'", 2642 .{ field_name, fqn }, 2643 ); 2644 errdefer msg.destroy(gpa); 2645 try sema.mod.errNoteNonLazy(union_obj.srcLoc(), msg, "union declared here", .{}); 2646 break :msg msg; 2647 }; 2648 return sema.failWithOwnedErrorMsg(msg); 2649} 2650 2651fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !void { 2652 const src_loc = decl_ty.declSrcLocOrNull() orelse return; 2653 const category = switch (decl_ty.zigTypeTag()) { 2654 .Union => "union", 2655 .Struct => "struct", 2656 .Enum => "enum", 2657 .Opaque => "opaque", 2658 .ErrorSet => "error set", 2659 else => unreachable, 2660 }; 2661 try sema.mod.errNoteNonLazy(src_loc, parent, "{s} declared here", .{category}); 2662} 2663 2664fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 2665 const tracy = trace(@src()); 2666 defer tracy.end(); 2667 2668 const bin_inst = sema.code.instructions.items(.data)[inst].bin; 2669 if (bin_inst.lhs == .none) { 2670 // This is an elided instruction, but AstGen was not smart enough 2671 // to omit it. 2672 return; 2673 } 2674 const ptr = sema.resolveInst(bin_inst.lhs); 2675 const value = sema.resolveInst(bin_inst.rhs); 2676 const ptr_ty = try Type.ptr(sema.arena, .{ 2677 .pointee_type = sema.typeOf(value), 2678 // TODO figure out which address space is appropriate here 2679 .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), 2680 }); 2681 // TODO detect when this store should be done at compile-time. For example, 2682 // if expressions should force it when the condition is compile-time known. 2683 const src: LazySrcLoc = .unneeded; 2684 try sema.requireRuntimeBlock(block, src); 2685 const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); 2686 return sema.storePtr(block, src, bitcasted_ptr, value); 2687} 2688 2689fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 2690 const tracy = trace(@src()); 2691 defer tracy.end(); 2692 2693 const src: LazySrcLoc = sema.src; 2694 const bin_inst = sema.code.instructions.items(.data)[inst].bin; 2695 const ptr = sema.resolveInst(bin_inst.lhs); 2696 const operand = sema.resolveInst(bin_inst.rhs); 2697 const operand_ty = sema.typeOf(operand); 2698 const ptr_inst = Air.refToIndex(ptr).?; 2699 assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); 2700 const air_datas = sema.air_instructions.items(.data); 2701 const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; 2702 2703 if (ptr_val.castTag(.inferred_alloc_comptime)) |iac| { 2704 // There will be only one store_to_inferred_ptr because we are running at comptime. 2705 // The alloc will turn into a Decl. 2706 if (try sema.resolveMaybeUndefValAllowVariables(block, src, operand)) |operand_val| { 2707 if (operand_val.tag() == .variable) { 2708 return sema.failWithNeededComptime(block, src); 2709 } 2710 var anon_decl = try block.startAnonDecl(); 2711 defer anon_decl.deinit(); 2712 iac.data.decl = try anon_decl.finish( 2713 try operand_ty.copy(anon_decl.arena()), 2714 try operand_val.copy(anon_decl.arena()), 2715 ); 2716 // TODO set the alignment on the decl 2717 return; 2718 } else { 2719 return sema.failWithNeededComptime(block, src); 2720 } 2721 } 2722 2723 if (ptr_val.castTag(.inferred_alloc)) |inferred_alloc| { 2724 // Add the stored instruction to the set we will use to resolve peer types 2725 // for the inferred allocation. 2726 try inferred_alloc.data.stored_inst_list.append(sema.arena, operand); 2727 // Create a runtime bitcast instruction with exactly the type the pointer wants. 2728 const ptr_ty = try Type.ptr(sema.arena, .{ 2729 .pointee_type = operand_ty, 2730 .@"align" = inferred_alloc.data.alignment, 2731 .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), 2732 }); 2733 const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); 2734 return sema.storePtr(block, src, bitcasted_ptr, operand); 2735 } 2736 unreachable; 2737} 2738 2739fn zirSetEvalBranchQuota(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 2740 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 2741 const src = inst_data.src(); 2742 const quota = try sema.resolveAlreadyCoercedInt(block, src, inst_data.operand, u32); 2743 if (sema.branch_quota < quota) 2744 sema.branch_quota = quota; 2745} 2746 2747fn zirStore(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 2748 const tracy = trace(@src()); 2749 defer tracy.end(); 2750 2751 const bin_inst = sema.code.instructions.items(.data)[inst].bin; 2752 const ptr = sema.resolveInst(bin_inst.lhs); 2753 const value = sema.resolveInst(bin_inst.rhs); 2754 return sema.storePtr(block, sema.src, ptr, value); 2755} 2756 2757fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 2758 const tracy = trace(@src()); 2759 defer tracy.end(); 2760 2761 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 2762 const src = inst_data.src(); 2763 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 2764 const ptr = sema.resolveInst(extra.lhs); 2765 const value = sema.resolveInst(extra.rhs); 2766 return sema.storePtr(block, src, ptr, value); 2767} 2768 2769fn zirStr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 2770 const tracy = trace(@src()); 2771 defer tracy.end(); 2772 2773 const zir_bytes = sema.code.instructions.items(.data)[inst].str.get(sema.code); 2774 2775 // `zir_bytes` references memory inside the ZIR module, which can get deallocated 2776 // after semantic analysis is complete, for example in the case of the initialization 2777 // expression of a variable declaration. We need the memory to be in the new 2778 // anonymous Decl's arena. 2779 2780 var anon_decl = try block.startAnonDecl(); 2781 defer anon_decl.deinit(); 2782 2783 const bytes = try anon_decl.arena().dupeZ(u8, zir_bytes); 2784 2785 const new_decl = try anon_decl.finish( 2786 try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), 2787 try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 2788 ); 2789 2790 return sema.analyzeDeclRef(new_decl); 2791} 2792 2793fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 2794 _ = block; 2795 const tracy = trace(@src()); 2796 defer tracy.end(); 2797 2798 const int = sema.code.instructions.items(.data)[inst].int; 2799 return sema.addIntUnsigned(Type.initTag(.comptime_int), int); 2800} 2801 2802fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 2803 _ = block; 2804 const tracy = trace(@src()); 2805 defer tracy.end(); 2806 2807 const arena = sema.arena; 2808 const int = sema.code.instructions.items(.data)[inst].str; 2809 const byte_count = int.len * @sizeOf(std.math.big.Limb); 2810 const limb_bytes = sema.code.string_bytes[int.start..][0..byte_count]; 2811 const limbs = try arena.alloc(std.math.big.Limb, int.len); 2812 mem.copy(u8, mem.sliceAsBytes(limbs), limb_bytes); 2813 2814 return sema.addConstant( 2815 Type.initTag(.comptime_int), 2816 try Value.Tag.int_big_positive.create(arena, limbs), 2817 ); 2818} 2819 2820fn zirFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 2821 _ = block; 2822 const arena = sema.arena; 2823 const number = sema.code.instructions.items(.data)[inst].float; 2824 return sema.addConstant( 2825 Type.initTag(.comptime_float), 2826 try Value.Tag.float_64.create(arena, number), 2827 ); 2828} 2829 2830fn zirFloat128(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 2831 _ = block; 2832 const arena = sema.arena; 2833 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 2834 const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data; 2835 const number = extra.get(); 2836 return sema.addConstant( 2837 Type.initTag(.comptime_float), 2838 try Value.Tag.float_128.create(arena, number), 2839 ); 2840} 2841 2842fn zirCompileError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { 2843 const tracy = trace(@src()); 2844 defer tracy.end(); 2845 2846 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 2847 const src = inst_data.src(); 2848 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 2849 const msg = try sema.resolveConstString(block, operand_src, inst_data.operand); 2850 return sema.fail(block, src, "{s}", .{msg}); 2851} 2852 2853fn zirCompileLog( 2854 sema: *Sema, 2855 block: *Block, 2856 extended: Zir.Inst.Extended.InstData, 2857) CompileError!Air.Inst.Ref { 2858 var managed = sema.mod.compile_log_text.toManaged(sema.gpa); 2859 defer sema.mod.compile_log_text = managed.moveToUnmanaged(); 2860 const writer = managed.writer(); 2861 2862 const extra = sema.code.extraData(Zir.Inst.NodeMultiOp, extended.operand); 2863 const src_node = extra.data.src_node; 2864 const src: LazySrcLoc = .{ .node_offset = src_node }; 2865 const args = sema.code.refSlice(extra.end, extended.small); 2866 2867 for (args) |arg_ref, i| { 2868 if (i != 0) try writer.print(", ", .{}); 2869 2870 const arg = sema.resolveInst(arg_ref); 2871 const arg_ty = sema.typeOf(arg); 2872 if (try sema.resolveMaybeUndefVal(block, src, arg)) |val| { 2873 try writer.print("@as({}, {})", .{ arg_ty, val }); 2874 } else { 2875 try writer.print("@as({}, [runtime value])", .{arg_ty}); 2876 } 2877 } 2878 try writer.print("\n", .{}); 2879 2880 const gop = try sema.mod.compile_log_decls.getOrPut(sema.gpa, sema.owner_decl); 2881 if (!gop.found_existing) { 2882 gop.value_ptr.* = src_node; 2883 } 2884 return Air.Inst.Ref.void_value; 2885} 2886 2887fn zirPanic(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { 2888 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 2889 const src: LazySrcLoc = inst_data.src(); 2890 const msg_inst = sema.resolveInst(inst_data.operand); 2891 2892 return sema.panicWithMsg(block, src, msg_inst); 2893} 2894 2895fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 2896 const tracy = trace(@src()); 2897 defer tracy.end(); 2898 2899 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 2900 const src = inst_data.src(); 2901 const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); 2902 const body = sema.code.extra[extra.end..][0..extra.data.body_len]; 2903 const gpa = sema.gpa; 2904 2905 // AIR expects a block outside the loop block too. 2906 // Reserve space for a Loop instruction so that generated Break instructions can 2907 // point to it, even if it doesn't end up getting used because the code ends up being 2908 // comptime evaluated. 2909 const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); 2910 const loop_inst = block_inst + 1; 2911 try sema.air_instructions.ensureUnusedCapacity(gpa, 2); 2912 sema.air_instructions.appendAssumeCapacity(.{ 2913 .tag = .block, 2914 .data = undefined, 2915 }); 2916 sema.air_instructions.appendAssumeCapacity(.{ 2917 .tag = .loop, 2918 .data = .{ .ty_pl = .{ 2919 .ty = .noreturn_type, 2920 .payload = undefined, 2921 } }, 2922 }); 2923 var label: Block.Label = .{ 2924 .zir_block = inst, 2925 .merges = .{ 2926 .results = .{}, 2927 .br_list = .{}, 2928 .block_inst = block_inst, 2929 }, 2930 }; 2931 var child_block = parent_block.makeSubBlock(); 2932 child_block.label = &label; 2933 child_block.runtime_cond = null; 2934 child_block.runtime_loop = src; 2935 child_block.runtime_index += 1; 2936 const merges = &child_block.label.?.merges; 2937 2938 defer child_block.instructions.deinit(gpa); 2939 defer merges.results.deinit(gpa); 2940 defer merges.br_list.deinit(gpa); 2941 2942 var loop_block = child_block.makeSubBlock(); 2943 defer loop_block.instructions.deinit(gpa); 2944 2945 _ = try sema.analyzeBody(&loop_block, body); 2946 2947 try child_block.instructions.append(gpa, loop_inst); 2948 2949 try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + 2950 loop_block.instructions.items.len); 2951 sema.air_instructions.items(.data)[loop_inst].ty_pl.payload = sema.addExtraAssumeCapacity( 2952 Air.Block{ .body_len = @intCast(u32, loop_block.instructions.items.len) }, 2953 ); 2954 sema.air_extra.appendSliceAssumeCapacity(loop_block.instructions.items); 2955 return sema.analyzeBlockBody(parent_block, src, &child_block, merges); 2956} 2957 2958fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 2959 const tracy = trace(@src()); 2960 defer tracy.end(); 2961 2962 const pl_node = sema.code.instructions.items(.data)[inst].pl_node; 2963 const src = pl_node.src(); 2964 const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index); 2965 const body = sema.code.extra[extra.end..][0..extra.data.body_len]; 2966 2967 // we check this here to avoid undefined symbols 2968 if (!@import("build_options").have_llvm) 2969 return sema.fail(parent_block, src, "cannot do C import on Zig compiler not built with LLVM-extension", .{}); 2970 2971 var c_import_buf = std.ArrayList(u8).init(sema.gpa); 2972 defer c_import_buf.deinit(); 2973 2974 var child_block: Block = .{ 2975 .parent = parent_block, 2976 .sema = sema, 2977 .src_decl = parent_block.src_decl, 2978 .namespace = parent_block.namespace, 2979 .wip_capture_scope = parent_block.wip_capture_scope, 2980 .instructions = .{}, 2981 .inlining = parent_block.inlining, 2982 .is_comptime = parent_block.is_comptime, 2983 .c_import_buf = &c_import_buf, 2984 }; 2985 defer child_block.instructions.deinit(sema.gpa); 2986 2987 _ = try sema.analyzeBody(&child_block, body); 2988 2989 const c_import_res = sema.mod.comp.cImport(c_import_buf.items) catch |err| 2990 return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); 2991 2992 if (c_import_res.errors.len != 0) { 2993 const msg = msg: { 2994 const msg = try sema.errMsg(&child_block, src, "C import failed", .{}); 2995 errdefer msg.destroy(sema.gpa); 2996 2997 if (!sema.mod.comp.bin_file.options.link_libc) 2998 try sema.errNote(&child_block, src, msg, "libc headers not available; compilation does not link against libc", .{}); 2999 3000 for (c_import_res.errors) |_| { 3001 // TODO integrate with LazySrcLoc 3002 // try sema.mod.errNoteNonLazy(.{}, msg, "{s}", .{clang_err.msg_ptr[0..clang_err.msg_len]}); 3003 // if (clang_err.filename_ptr) |p| p[0..clang_err.filename_len] else "(no file)", 3004 // clang_err.line + 1, 3005 // clang_err.column + 1, 3006 } 3007 @import("clang.zig").Stage2ErrorMsg.delete(c_import_res.errors.ptr, c_import_res.errors.len); 3008 break :msg msg; 3009 }; 3010 return sema.failWithOwnedErrorMsg(msg); 3011 } 3012 const c_import_pkg = Package.create( 3013 sema.gpa, 3014 null, 3015 c_import_res.out_zig_path, 3016 ) catch |err| switch (err) { 3017 error.OutOfMemory => return error.OutOfMemory, 3018 else => unreachable, // we pass null for root_src_dir_path 3019 }; 3020 const std_pkg = sema.mod.main_pkg.table.get("std").?; 3021 const builtin_pkg = sema.mod.main_pkg.table.get("builtin").?; 3022 try c_import_pkg.add(sema.gpa, "builtin", builtin_pkg); 3023 try c_import_pkg.add(sema.gpa, "std", std_pkg); 3024 3025 const result = sema.mod.importPkg(c_import_pkg) catch |err| 3026 return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); 3027 3028 sema.mod.astGenFile(result.file) catch |err| 3029 return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); 3030 3031 try sema.mod.semaFile(result.file); 3032 const file_root_decl = result.file.root_decl.?; 3033 try sema.mod.declareDeclDependency(sema.owner_decl, file_root_decl); 3034 return sema.addConstant(file_root_decl.ty, file_root_decl.val); 3035} 3036 3037fn zirSuspendBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 3038 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 3039 const src = inst_data.src(); 3040 return sema.fail(parent_block, src, "TODO: implement Sema.zirSuspendBlock", .{}); 3041} 3042 3043fn zirBlock( 3044 sema: *Sema, 3045 parent_block: *Block, 3046 inst: Zir.Inst.Index, 3047) CompileError!Air.Inst.Ref { 3048 const tracy = trace(@src()); 3049 defer tracy.end(); 3050 3051 const pl_node = sema.code.instructions.items(.data)[inst].pl_node; 3052 const src = pl_node.src(); 3053 const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index); 3054 const body = sema.code.extra[extra.end..][0..extra.data.body_len]; 3055 const gpa = sema.gpa; 3056 3057 // Reserve space for a Block instruction so that generated Break instructions can 3058 // point to it, even if it doesn't end up getting used because the code ends up being 3059 // comptime evaluated. 3060 const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); 3061 try sema.air_instructions.append(gpa, .{ 3062 .tag = .block, 3063 .data = undefined, 3064 }); 3065 3066 var label: Block.Label = .{ 3067 .zir_block = inst, 3068 .merges = .{ 3069 .results = .{}, 3070 .br_list = .{}, 3071 .block_inst = block_inst, 3072 }, 3073 }; 3074 3075 var child_block: Block = .{ 3076 .parent = parent_block, 3077 .sema = sema, 3078 .src_decl = parent_block.src_decl, 3079 .namespace = parent_block.namespace, 3080 .wip_capture_scope = parent_block.wip_capture_scope, 3081 .instructions = .{}, 3082 .label = &label, 3083 .inlining = parent_block.inlining, 3084 .is_comptime = parent_block.is_comptime, 3085 }; 3086 const merges = &child_block.label.?.merges; 3087 3088 defer child_block.instructions.deinit(gpa); 3089 defer merges.results.deinit(gpa); 3090 defer merges.br_list.deinit(gpa); 3091 3092 _ = try sema.analyzeBody(&child_block, body); 3093 3094 return sema.analyzeBlockBody(parent_block, src, &child_block, merges); 3095} 3096 3097fn resolveBlockBody( 3098 sema: *Sema, 3099 parent_block: *Block, 3100 src: LazySrcLoc, 3101 child_block: *Block, 3102 body: []const Zir.Inst.Index, 3103 merges: *Block.Merges, 3104) CompileError!Air.Inst.Ref { 3105 if (child_block.is_comptime) { 3106 return sema.resolveBody(child_block, body); 3107 } else { 3108 _ = try sema.analyzeBody(child_block, body); 3109 return sema.analyzeBlockBody(parent_block, src, child_block, merges); 3110 } 3111} 3112 3113fn analyzeBlockBody( 3114 sema: *Sema, 3115 parent_block: *Block, 3116 src: LazySrcLoc, 3117 child_block: *Block, 3118 merges: *Block.Merges, 3119) CompileError!Air.Inst.Ref { 3120 const tracy = trace(@src()); 3121 defer tracy.end(); 3122 3123 const gpa = sema.gpa; 3124 3125 // Blocks must terminate with noreturn instruction. 3126 assert(child_block.instructions.items.len != 0); 3127 assert(sema.typeOf(Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn()); 3128 3129 if (merges.results.items.len == 0) { 3130 // No need for a block instruction. We can put the new instructions 3131 // directly into the parent block. 3132 try parent_block.instructions.appendSlice(gpa, child_block.instructions.items); 3133 return Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1]); 3134 } 3135 if (merges.results.items.len == 1) { 3136 const last_inst_index = child_block.instructions.items.len - 1; 3137 const last_inst = child_block.instructions.items[last_inst_index]; 3138 if (sema.getBreakBlock(last_inst)) |br_block| { 3139 if (br_block == merges.block_inst) { 3140 // No need for a block instruction. We can put the new instructions directly 3141 // into the parent block. Here we omit the break instruction. 3142 const without_break = child_block.instructions.items[0..last_inst_index]; 3143 try parent_block.instructions.appendSlice(gpa, without_break); 3144 return merges.results.items[0]; 3145 } 3146 } 3147 } 3148 // It is impossible to have the number of results be > 1 in a comptime scope. 3149 assert(!child_block.is_comptime); // Should already got a compile error in the condbr condition. 3150 3151 // Need to set the type and emit the Block instruction. This allows machine code generation 3152 // to emit a jump instruction to after the block when it encounters the break. 3153 try parent_block.instructions.append(gpa, merges.block_inst); 3154 const resolved_ty = try sema.resolvePeerTypes(parent_block, src, merges.results.items, .none); 3155 const ty_inst = try sema.addType(resolved_ty); 3156 try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + 3157 child_block.instructions.items.len); 3158 sema.air_instructions.items(.data)[merges.block_inst] = .{ .ty_pl = .{ 3159 .ty = ty_inst, 3160 .payload = sema.addExtraAssumeCapacity(Air.Block{ 3161 .body_len = @intCast(u32, child_block.instructions.items.len), 3162 }), 3163 } }; 3164 sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items); 3165 // Now that the block has its type resolved, we need to go back into all the break 3166 // instructions, and insert type coercion on the operands. 3167 for (merges.br_list.items) |br| { 3168 const br_operand = sema.air_instructions.items(.data)[br].br.operand; 3169 const br_operand_src = src; 3170 const br_operand_ty = sema.typeOf(br_operand); 3171 if (br_operand_ty.eql(resolved_ty)) { 3172 // No type coercion needed. 3173 continue; 3174 } 3175 var coerce_block = parent_block.makeSubBlock(); 3176 defer coerce_block.instructions.deinit(gpa); 3177 const coerced_operand = try sema.coerce(&coerce_block, resolved_ty, br_operand, br_operand_src); 3178 // If no instructions were produced, such as in the case of a coercion of a 3179 // constant value to a new type, we can simply point the br operand to it. 3180 if (coerce_block.instructions.items.len == 0) { 3181 sema.air_instructions.items(.data)[br].br.operand = coerced_operand; 3182 continue; 3183 } 3184 assert(coerce_block.instructions.items[coerce_block.instructions.items.len - 1] == 3185 Air.refToIndex(coerced_operand).?); 3186 3187 // Convert the br instruction to a block instruction that has the coercion 3188 // and then a new br inside that returns the coerced instruction. 3189 const sub_block_len = @intCast(u32, coerce_block.instructions.items.len + 1); 3190 try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + 3191 sub_block_len); 3192 try sema.air_instructions.ensureUnusedCapacity(gpa, 1); 3193 const sub_br_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); 3194 3195 sema.air_instructions.items(.tag)[br] = .block; 3196 sema.air_instructions.items(.data)[br] = .{ .ty_pl = .{ 3197 .ty = Air.Inst.Ref.noreturn_type, 3198 .payload = sema.addExtraAssumeCapacity(Air.Block{ 3199 .body_len = sub_block_len, 3200 }), 3201 } }; 3202 sema.air_extra.appendSliceAssumeCapacity(coerce_block.instructions.items); 3203 sema.air_extra.appendAssumeCapacity(sub_br_inst); 3204 3205 sema.air_instructions.appendAssumeCapacity(.{ 3206 .tag = .br, 3207 .data = .{ .br = .{ 3208 .block_inst = merges.block_inst, 3209 .operand = coerced_operand, 3210 } }, 3211 }); 3212 } 3213 return Air.indexToRef(merges.block_inst); 3214} 3215 3216fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 3217 const tracy = trace(@src()); 3218 defer tracy.end(); 3219 3220 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 3221 const extra = sema.code.extraData(Zir.Inst.Export, inst_data.payload_index).data; 3222 const src = inst_data.src(); 3223 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 3224 const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 3225 const decl_name = sema.code.nullTerminatedString(extra.decl_name); 3226 if (extra.namespace != .none) { 3227 return sema.fail(block, src, "TODO: implement exporting with field access", .{}); 3228 } 3229 const decl = try sema.lookupIdentifier(block, operand_src, decl_name); 3230 const options = try sema.resolveExportOptions(block, options_src, extra.options); 3231 try sema.analyzeExport(block, src, options, decl); 3232} 3233 3234fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 3235 const tracy = trace(@src()); 3236 defer tracy.end(); 3237 3238 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 3239 const extra = sema.code.extraData(Zir.Inst.ExportValue, inst_data.payload_index).data; 3240 const src = inst_data.src(); 3241 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 3242 const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 3243 const operand = try sema.resolveInstConst(block, operand_src, extra.operand); 3244 const options = try sema.resolveExportOptions(block, options_src, extra.options); 3245 const decl = switch (operand.val.tag()) { 3246 .function => operand.val.castTag(.function).?.data.owner_decl, 3247 else => return sema.fail(block, operand_src, "TODO implement exporting arbitrary Value objects", .{}), // TODO put this Value into an anonymous Decl and then export it. 3248 }; 3249 try sema.analyzeExport(block, src, options, decl); 3250} 3251 3252pub fn analyzeExport( 3253 sema: *Sema, 3254 block: *Block, 3255 src: LazySrcLoc, 3256 borrowed_options: std.builtin.ExportOptions, 3257 exported_decl: *Decl, 3258) !void { 3259 const Export = Module.Export; 3260 const mod = sema.mod; 3261 3262 try mod.ensureDeclAnalyzed(exported_decl); 3263 // TODO run the same checks as we do for C ABI struct fields 3264 switch (exported_decl.ty.zigTypeTag()) { 3265 .Fn, .Int, .Struct, .Array, .Float => {}, 3266 else => return sema.fail(block, src, "unable to export type '{}'", .{exported_decl.ty}), 3267 } 3268 3269 const gpa = mod.gpa; 3270 3271 try mod.decl_exports.ensureUnusedCapacity(gpa, 1); 3272 try mod.export_owners.ensureUnusedCapacity(gpa, 1); 3273 3274 const new_export = try gpa.create(Export); 3275 errdefer gpa.destroy(new_export); 3276 3277 const symbol_name = try gpa.dupe(u8, borrowed_options.name); 3278 errdefer gpa.free(symbol_name); 3279 3280 const section: ?[]const u8 = if (borrowed_options.section) |s| try gpa.dupe(u8, s) else null; 3281 errdefer if (section) |s| gpa.free(s); 3282 3283 const src_decl = block.src_decl; 3284 const owner_decl = sema.owner_decl; 3285 3286 log.debug("exporting Decl '{s}' as symbol '{s}' from Decl '{s}'", .{ 3287 exported_decl.name, symbol_name, owner_decl.name, 3288 }); 3289 3290 new_export.* = .{ 3291 .options = .{ 3292 .name = symbol_name, 3293 .linkage = borrowed_options.linkage, 3294 .section = section, 3295 }, 3296 .src = src, 3297 .link = switch (mod.comp.bin_file.tag) { 3298 .coff => .{ .coff = {} }, 3299 .elf => .{ .elf = .{} }, 3300 .macho => .{ .macho = .{} }, 3301 .plan9 => .{ .plan9 = null }, 3302 .c => .{ .c = {} }, 3303 .wasm => .{ .wasm = {} }, 3304 .spirv => .{ .spirv = {} }, 3305 }, 3306 .owner_decl = owner_decl, 3307 .src_decl = src_decl, 3308 .exported_decl = exported_decl, 3309 .status = .in_progress, 3310 }; 3311 3312 // Add to export_owners table. 3313 const eo_gop = mod.export_owners.getOrPutAssumeCapacity(owner_decl); 3314 if (!eo_gop.found_existing) { 3315 eo_gop.value_ptr.* = &[0]*Export{}; 3316 } 3317 eo_gop.value_ptr.* = try gpa.realloc(eo_gop.value_ptr.*, eo_gop.value_ptr.len + 1); 3318 eo_gop.value_ptr.*[eo_gop.value_ptr.len - 1] = new_export; 3319 errdefer eo_gop.value_ptr.* = gpa.shrink(eo_gop.value_ptr.*, eo_gop.value_ptr.len - 1); 3320 3321 // Add to exported_decl table. 3322 const de_gop = mod.decl_exports.getOrPutAssumeCapacity(exported_decl); 3323 if (!de_gop.found_existing) { 3324 de_gop.value_ptr.* = &[0]*Export{}; 3325 } 3326 de_gop.value_ptr.* = try gpa.realloc(de_gop.value_ptr.*, de_gop.value_ptr.len + 1); 3327 de_gop.value_ptr.*[de_gop.value_ptr.len - 1] = new_export; 3328 errdefer de_gop.value_ptr.* = gpa.shrink(de_gop.value_ptr.*, de_gop.value_ptr.len - 1); 3329} 3330 3331fn zirSetAlignStack(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 3332 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 3333 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 3334 const src: LazySrcLoc = inst_data.src(); 3335 const alignment = try sema.resolveAlign(block, operand_src, inst_data.operand); 3336 if (alignment > 256) { 3337 return sema.fail(block, src, "attempt to @setAlignStack({d}); maximum is 256", .{ 3338 alignment, 3339 }); 3340 } 3341 const func = sema.owner_func orelse 3342 return sema.fail(block, src, "@setAlignStack outside function body", .{}); 3343 3344 switch (func.owner_decl.ty.fnCallingConvention()) { 3345 .Naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}), 3346 .Inline => return sema.fail(block, src, "@setAlignStack in inline function", .{}), 3347 else => {}, 3348 } 3349 3350 const gop = try sema.mod.align_stack_fns.getOrPut(sema.mod.gpa, func); 3351 if (gop.found_existing) { 3352 const msg = msg: { 3353 const msg = try sema.errMsg(block, src, "multiple @setAlignStack in the same function body", .{}); 3354 errdefer msg.destroy(sema.gpa); 3355 try sema.errNote(block, src, msg, "other instance here", .{}); 3356 break :msg msg; 3357 }; 3358 return sema.failWithOwnedErrorMsg(msg); 3359 } 3360 gop.value_ptr.* = .{ .alignment = alignment, .src = src }; 3361} 3362 3363fn zirSetCold(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 3364 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 3365 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 3366 const is_cold = try sema.resolveConstBool(block, operand_src, inst_data.operand); 3367 const func = sema.func orelse return; // does nothing outside a function 3368 func.is_cold = is_cold; 3369} 3370 3371fn zirSetFloatMode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 3372 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 3373 const src: LazySrcLoc = inst_data.src(); 3374 return sema.fail(block, src, "TODO: implement Sema.zirSetFloatMode", .{}); 3375} 3376 3377fn zirSetRuntimeSafety(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 3378 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 3379 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 3380 block.want_safety = try sema.resolveConstBool(block, operand_src, inst_data.operand); 3381} 3382 3383fn zirFence(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 3384 if (block.is_comptime) return; 3385 3386 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 3387 const order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 3388 const order = try sema.resolveAtomicOrder(block, order_src, inst_data.operand); 3389 3390 if (@enumToInt(order) < @enumToInt(std.builtin.AtomicOrder.Acquire)) { 3391 return sema.fail(block, order_src, "atomic ordering must be Acquire or stricter", .{}); 3392 } 3393 3394 _ = try block.addInst(.{ 3395 .tag = .fence, 3396 .data = .{ .fence = order }, 3397 }); 3398} 3399 3400fn zirBreak(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { 3401 const tracy = trace(@src()); 3402 defer tracy.end(); 3403 3404 const inst_data = sema.code.instructions.items(.data)[inst].@"break"; 3405 const operand = sema.resolveInst(inst_data.operand); 3406 const zir_block = inst_data.block_inst; 3407 3408 var block = start_block; 3409 while (true) { 3410 if (block.label) |label| { 3411 if (label.zir_block == zir_block) { 3412 const br_ref = try start_block.addBr(label.merges.block_inst, operand); 3413 try label.merges.results.append(sema.gpa, operand); 3414 try label.merges.br_list.append(sema.gpa, Air.refToIndex(br_ref).?); 3415 return inst; 3416 } 3417 } 3418 block = block.parent.?; 3419 } 3420} 3421 3422fn zirDbgStmt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 3423 const tracy = trace(@src()); 3424 defer tracy.end(); 3425 3426 // We do not set sema.src here because dbg_stmt instructions are only emitted for 3427 // ZIR code that possibly will need to generate runtime code. So error messages 3428 // and other source locations must not rely on sema.src being set from dbg_stmt 3429 // instructions. 3430 if (block.is_comptime) return; 3431 3432 const inst_data = sema.code.instructions.items(.data)[inst].dbg_stmt; 3433 _ = try block.addInst(.{ 3434 .tag = .dbg_stmt, 3435 .data = .{ .dbg_stmt = .{ 3436 .line = inst_data.line, 3437 .column = inst_data.column, 3438 } }, 3439 }); 3440} 3441 3442fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 3443 const inst_data = sema.code.instructions.items(.data)[inst].str_tok; 3444 const src = inst_data.src(); 3445 const decl_name = inst_data.get(sema.code); 3446 const decl = try sema.lookupIdentifier(block, src, decl_name); 3447 return sema.analyzeDeclRef(decl); 3448} 3449 3450fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 3451 const inst_data = sema.code.instructions.items(.data)[inst].str_tok; 3452 const src = inst_data.src(); 3453 const decl_name = inst_data.get(sema.code); 3454 const decl = try sema.lookupIdentifier(block, src, decl_name); 3455 return sema.analyzeDeclVal(block, src, decl); 3456} 3457 3458fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: []const u8) !*Decl { 3459 var namespace = block.namespace; 3460 while (true) { 3461 if (try sema.lookupInNamespace(block, src, namespace, name, false)) |decl| { 3462 return decl; 3463 } 3464 namespace = namespace.parent orelse break; 3465 } 3466 unreachable; // AstGen detects use of undeclared identifier errors. 3467} 3468 3469/// This looks up a member of a specific namespace. It is affected by `usingnamespace` but 3470/// only for ones in the specified namespace. 3471fn lookupInNamespace( 3472 sema: *Sema, 3473 block: *Block, 3474 src: LazySrcLoc, 3475 namespace: *Namespace, 3476 ident_name: []const u8, 3477 observe_usingnamespace: bool, 3478) CompileError!?*Decl { 3479 const mod = sema.mod; 3480 3481 const namespace_decl = namespace.getDecl(); 3482 if (namespace_decl.analysis == .file_failure) { 3483 try mod.declareDeclDependency(sema.owner_decl, namespace_decl); 3484 return error.AnalysisFail; 3485 } 3486 3487 if (observe_usingnamespace and namespace.usingnamespace_set.count() != 0) { 3488 const src_file = block.namespace.file_scope; 3489 3490 const gpa = sema.gpa; 3491 var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, void) = .{}; 3492 defer checked_namespaces.deinit(gpa); 3493 3494 // Keep track of name conflicts for error notes. 3495 var candidates: std.ArrayListUnmanaged(*Decl) = .{}; 3496 defer candidates.deinit(gpa); 3497 3498 try checked_namespaces.put(gpa, namespace, {}); 3499 var check_i: usize = 0; 3500 3501 while (check_i < checked_namespaces.count()) : (check_i += 1) { 3502 const check_ns = checked_namespaces.keys()[check_i]; 3503 if (check_ns.decls.get(ident_name)) |decl| { 3504 // Skip decls which are not marked pub, which are in a different 3505 // file than the `a.b`/`@hasDecl` syntax. 3506 if (decl.is_pub or src_file == decl.getFileScope()) { 3507 try candidates.append(gpa, decl); 3508 } 3509 } 3510 var it = check_ns.usingnamespace_set.iterator(); 3511 while (it.next()) |entry| { 3512 const sub_usingnamespace_decl = entry.key_ptr.*; 3513 const sub_is_pub = entry.value_ptr.*; 3514 if (!sub_is_pub and src_file != sub_usingnamespace_decl.getFileScope()) { 3515 // Skip usingnamespace decls which are not marked pub, which are in 3516 // a different file than the `a.b`/`@hasDecl` syntax. 3517 continue; 3518 } 3519 try sema.ensureDeclAnalyzed(sub_usingnamespace_decl); 3520 const ns_ty = sub_usingnamespace_decl.val.castTag(.ty).?.data; 3521 const sub_ns = ns_ty.getNamespace().?; 3522 try checked_namespaces.put(gpa, sub_ns, {}); 3523 } 3524 } 3525 3526 switch (candidates.items.len) { 3527 0 => {}, 3528 1 => { 3529 const decl = candidates.items[0]; 3530 try mod.declareDeclDependency(sema.owner_decl, decl); 3531 return decl; 3532 }, 3533 else => { 3534 const msg = msg: { 3535 const msg = try sema.errMsg(block, src, "ambiguous reference", .{}); 3536 errdefer msg.destroy(gpa); 3537 for (candidates.items) |candidate| { 3538 const src_loc = candidate.srcLoc(); 3539 try mod.errNoteNonLazy(src_loc, msg, "declared here", .{}); 3540 } 3541 break :msg msg; 3542 }; 3543 return sema.failWithOwnedErrorMsg(msg); 3544 }, 3545 } 3546 } else if (namespace.decls.get(ident_name)) |decl| { 3547 try mod.declareDeclDependency(sema.owner_decl, decl); 3548 return decl; 3549 } 3550 3551 log.debug("{*} ({s}) depends on non-existence of '{s}' in {*} ({s})", .{ 3552 sema.owner_decl, sema.owner_decl.name, ident_name, namespace_decl, namespace_decl.name, 3553 }); 3554 // TODO This dependency is too strong. Really, it should only be a dependency 3555 // on the non-existence of `ident_name` in the namespace. We can lessen the number of 3556 // outdated declarations by making this dependency more sophisticated. 3557 try mod.declareDeclDependency(sema.owner_decl, namespace_decl); 3558 return null; 3559} 3560 3561fn zirCall( 3562 sema: *Sema, 3563 block: *Block, 3564 inst: Zir.Inst.Index, 3565) CompileError!Air.Inst.Ref { 3566 const tracy = trace(@src()); 3567 defer tracy.end(); 3568 3569 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 3570 const func_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node }; 3571 const call_src = inst_data.src(); 3572 const extra = sema.code.extraData(Zir.Inst.Call, inst_data.payload_index); 3573 const args = sema.code.refSlice(extra.end, extra.data.flags.args_len); 3574 3575 const modifier = @intToEnum(std.builtin.CallOptions.Modifier, extra.data.flags.packed_modifier); 3576 const ensure_result_used = extra.data.flags.ensure_result_used; 3577 3578 var func = sema.resolveInst(extra.data.callee); 3579 var resolved_args: []Air.Inst.Ref = undefined; 3580 3581 const func_type = sema.typeOf(func); 3582 3583 // Desugar bound functions here 3584 if (func_type.tag() == .bound_fn) { 3585 const bound_func = try sema.resolveValue(block, func_src, func); 3586 const bound_data = &bound_func.cast(Value.Payload.BoundFn).?.data; 3587 func = bound_data.func_inst; 3588 resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len + 1); 3589 resolved_args[0] = bound_data.arg0_inst; 3590 for (args) |zir_arg, i| { 3591 resolved_args[i + 1] = sema.resolveInst(zir_arg); 3592 } 3593 } else { 3594 resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len); 3595 for (args) |zir_arg, i| { 3596 resolved_args[i] = sema.resolveInst(zir_arg); 3597 } 3598 } 3599 3600 return sema.analyzeCall(block, func, func_src, call_src, modifier, ensure_result_used, resolved_args); 3601} 3602 3603const GenericCallAdapter = struct { 3604 generic_fn: *Module.Fn, 3605 precomputed_hash: u64, 3606 func_ty_info: Type.Payload.Function.Data, 3607 comptime_tvs: []const TypedValue, 3608 3609 pub fn eql(ctx: @This(), adapted_key: void, other_key: *Module.Fn) bool { 3610 _ = adapted_key; 3611 // The generic function Decl is guaranteed to be the first dependency 3612 // of each of its instantiations. 3613 const generic_owner_decl = other_key.owner_decl.dependencies.keys()[0]; 3614 if (ctx.generic_fn.owner_decl != generic_owner_decl) return false; 3615 3616 const other_comptime_args = other_key.comptime_args.?; 3617 for (other_comptime_args[0..ctx.func_ty_info.param_types.len]) |other_arg, i| { 3618 if (other_arg.ty.tag() != .generic_poison) { 3619 // anytype parameter 3620 if (!other_arg.ty.eql(ctx.comptime_tvs[i].ty)) { 3621 return false; 3622 } 3623 } 3624 if (other_arg.val.tag() != .generic_poison) { 3625 // comptime parameter 3626 if (ctx.comptime_tvs[i].val.tag() == .generic_poison) { 3627 // No match because the instantiation has a comptime parameter 3628 // but the callsite does not. 3629 return false; 3630 } 3631 if (!other_arg.val.eql(ctx.comptime_tvs[i].val, other_arg.ty)) { 3632 return false; 3633 } 3634 } 3635 } 3636 return true; 3637 } 3638 3639 /// The implementation of the hash is in semantic analysis of function calls, so 3640 /// that any errors when computing the hash can be properly reported. 3641 pub fn hash(ctx: @This(), adapted_key: void) u64 { 3642 _ = adapted_key; 3643 return ctx.precomputed_hash; 3644 } 3645}; 3646 3647const GenericRemoveAdapter = struct { 3648 precomputed_hash: u64, 3649 3650 pub fn eql(ctx: @This(), adapted_key: *Module.Fn, other_key: *Module.Fn) bool { 3651 _ = ctx; 3652 return adapted_key == other_key; 3653 } 3654 3655 /// The implementation of the hash is in semantic analysis of function calls, so 3656 /// that any errors when computing the hash can be properly reported. 3657 pub fn hash(ctx: @This(), adapted_key: *Module.Fn) u64 { 3658 _ = adapted_key; 3659 return ctx.precomputed_hash; 3660 } 3661}; 3662 3663fn analyzeCall( 3664 sema: *Sema, 3665 block: *Block, 3666 func: Air.Inst.Ref, 3667 func_src: LazySrcLoc, 3668 call_src: LazySrcLoc, 3669 modifier: std.builtin.CallOptions.Modifier, 3670 ensure_result_used: bool, 3671 uncasted_args: []const Air.Inst.Ref, 3672) CompileError!Air.Inst.Ref { 3673 const mod = sema.mod; 3674 3675 const callee_ty = sema.typeOf(func); 3676 const func_ty = func_ty: { 3677 switch (callee_ty.zigTypeTag()) { 3678 .Fn => break :func_ty callee_ty, 3679 .Pointer => { 3680 const ptr_info = callee_ty.ptrInfo().data; 3681 if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Fn) { 3682 break :func_ty ptr_info.pointee_type; 3683 } 3684 }, 3685 else => {}, 3686 } 3687 return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty}); 3688 }; 3689 3690 const func_ty_info = func_ty.fnInfo(); 3691 const cc = func_ty_info.cc; 3692 if (cc == .Naked) { 3693 // TODO add error note: declared here 3694 return sema.fail( 3695 block, 3696 func_src, 3697 "unable to call function with naked calling convention", 3698 .{}, 3699 ); 3700 } 3701 const fn_params_len = func_ty_info.param_types.len; 3702 if (func_ty_info.is_var_args) { 3703 assert(cc == .C); 3704 if (uncasted_args.len < fn_params_len) { 3705 // TODO add error note: declared here 3706 return sema.fail( 3707 block, 3708 func_src, 3709 "expected at least {d} argument(s), found {d}", 3710 .{ fn_params_len, uncasted_args.len }, 3711 ); 3712 } 3713 } else if (fn_params_len != uncasted_args.len) { 3714 // TODO add error note: declared here 3715 return sema.fail( 3716 block, 3717 func_src, 3718 "expected {d} argument(s), found {d}", 3719 .{ fn_params_len, uncasted_args.len }, 3720 ); 3721 } 3722 3723 switch (modifier) { 3724 .auto, 3725 .always_inline, 3726 .compile_time, 3727 => {}, 3728 3729 .async_kw, 3730 .never_tail, 3731 .never_inline, 3732 .no_async, 3733 .always_tail, 3734 => return sema.fail(block, call_src, "TODO implement call with modifier {}", .{ 3735 modifier, 3736 }), 3737 } 3738 3739 const gpa = sema.gpa; 3740 3741 const is_comptime_call = block.is_comptime or modifier == .compile_time or 3742 func_ty_info.return_type.requiresComptime(); 3743 const is_inline_call = is_comptime_call or modifier == .always_inline or 3744 func_ty_info.cc == .Inline; 3745 const result: Air.Inst.Ref = if (is_inline_call) res: { 3746 const func_val = try sema.resolveConstValue(block, func_src, func); 3747 const module_fn = switch (func_val.tag()) { 3748 .decl_ref => func_val.castTag(.decl_ref).?.data.val.castTag(.function).?.data, 3749 .function => func_val.castTag(.function).?.data, 3750 .extern_fn => return sema.fail(block, call_src, "{s} call of extern function", .{ 3751 @as([]const u8, if (is_comptime_call) "comptime" else "inline"), 3752 }), 3753 else => unreachable, 3754 }; 3755 3756 // Analyze the ZIR. The same ZIR gets analyzed into a runtime function 3757 // or an inlined call depending on what union tag the `label` field is 3758 // set to in the `Block`. 3759 // This block instruction will be used to capture the return value from the 3760 // inlined function. 3761 const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); 3762 try sema.air_instructions.append(gpa, .{ 3763 .tag = .block, 3764 .data = undefined, 3765 }); 3766 // This one is shared among sub-blocks within the same callee, but not 3767 // shared among the entire inline/comptime call stack. 3768 var inlining: Block.Inlining = .{ 3769 .comptime_result = undefined, 3770 .merges = .{ 3771 .results = .{}, 3772 .br_list = .{}, 3773 .block_inst = block_inst, 3774 }, 3775 }; 3776 // In order to save a bit of stack space, directly modify Sema rather 3777 // than create a child one. 3778 const parent_zir = sema.code; 3779 sema.code = module_fn.owner_decl.getFileScope().zir; 3780 defer sema.code = parent_zir; 3781 3782 const parent_inst_map = sema.inst_map; 3783 sema.inst_map = .{}; 3784 defer { 3785 sema.inst_map.deinit(gpa); 3786 sema.inst_map = parent_inst_map; 3787 } 3788 3789 const parent_func = sema.func; 3790 sema.func = module_fn; 3791 defer sema.func = parent_func; 3792 3793 var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, module_fn.owner_decl.src_scope); 3794 defer wip_captures.deinit(); 3795 3796 var child_block: Block = .{ 3797 .parent = null, 3798 .sema = sema, 3799 .src_decl = module_fn.owner_decl, 3800 .namespace = module_fn.owner_decl.src_namespace, 3801 .wip_capture_scope = wip_captures.scope, 3802 .instructions = .{}, 3803 .label = null, 3804 .inlining = &inlining, 3805 .is_comptime = is_comptime_call, 3806 }; 3807 3808 const merges = &child_block.inlining.?.merges; 3809 3810 defer child_block.instructions.deinit(gpa); 3811 defer merges.results.deinit(gpa); 3812 defer merges.br_list.deinit(gpa); 3813 3814 // If it's a comptime function call, we need to memoize it as long as no external 3815 // comptime memory is mutated. 3816 var memoized_call_key: Module.MemoizedCall.Key = undefined; 3817 var delete_memoized_call_key = false; 3818 defer if (delete_memoized_call_key) gpa.free(memoized_call_key.args); 3819 if (is_comptime_call) { 3820 memoized_call_key = .{ 3821 .func = module_fn, 3822 .args = try gpa.alloc(TypedValue, func_ty_info.param_types.len), 3823 }; 3824 delete_memoized_call_key = true; 3825 } 3826 3827 try sema.emitBackwardBranch(&child_block, call_src); 3828 3829 // This will have return instructions analyzed as break instructions to 3830 // the block_inst above. Here we are performing "comptime/inline semantic analysis" 3831 // for a function body, which means we must map the parameter ZIR instructions to 3832 // the AIR instructions of the callsite. The callee could be a generic function 3833 // which means its parameter type expressions must be resolved in order and used 3834 // to successively coerce the arguments. 3835 const fn_info = sema.code.getFnInfo(module_fn.zir_body_inst); 3836 const zir_tags = sema.code.instructions.items(.tag); 3837 var arg_i: usize = 0; 3838 for (fn_info.param_body) |inst| switch (zir_tags[inst]) { 3839 .param, .param_comptime => { 3840 // Evaluate the parameter type expression now that previous ones have 3841 // been mapped, and coerce the corresponding argument to it. 3842 const pl_tok = sema.code.instructions.items(.data)[inst].pl_tok; 3843 const param_src = pl_tok.src(); 3844 const extra = sema.code.extraData(Zir.Inst.Param, pl_tok.payload_index); 3845 const param_body = sema.code.extra[extra.end..][0..extra.data.body_len]; 3846 const param_ty_inst = try sema.resolveBody(&child_block, param_body); 3847 const param_ty = try sema.analyzeAsType(&child_block, param_src, param_ty_inst); 3848 const arg_src = call_src; // TODO: better source location 3849 const casted_arg = try sema.coerce(&child_block, param_ty, uncasted_args[arg_i], arg_src); 3850 try sema.inst_map.putNoClobber(gpa, inst, casted_arg); 3851 3852 if (is_comptime_call) { 3853 const arg_val = try sema.resolveConstMaybeUndefVal(&child_block, arg_src, casted_arg); 3854 memoized_call_key.args[arg_i] = .{ 3855 .ty = param_ty, 3856 .val = arg_val, 3857 }; 3858 } 3859 3860 arg_i += 1; 3861 continue; 3862 }, 3863 .param_anytype, .param_anytype_comptime => { 3864 // No coercion needed. 3865 const uncasted_arg = uncasted_args[arg_i]; 3866 try sema.inst_map.putNoClobber(gpa, inst, uncasted_arg); 3867 3868 if (is_comptime_call) { 3869 const arg_src = call_src; // TODO: better source location 3870 const arg_val = try sema.resolveConstMaybeUndefVal(&child_block, arg_src, uncasted_arg); 3871 memoized_call_key.args[arg_i] = .{ 3872 .ty = sema.typeOf(uncasted_arg), 3873 .val = arg_val, 3874 }; 3875 } 3876 3877 arg_i += 1; 3878 continue; 3879 }, 3880 else => continue, 3881 }; 3882 3883 // In case it is a generic function with an expression for the return type that depends 3884 // on parameters, we must now do the same for the return type as we just did with 3885 // each of the parameters, resolving the return type and providing it to the child 3886 // `Sema` so that it can be used for the `ret_ptr` instruction. 3887 const ret_ty_inst = try sema.resolveBody(&child_block, fn_info.ret_ty_body); 3888 const ret_ty_src = func_src; // TODO better source location 3889 const bare_return_type = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst); 3890 // If the function has an inferred error set, `bare_return_type` is the payload type only. 3891 const fn_ret_ty = blk: { 3892 // TODO instead of reusing the function's inferred error set, this code should 3893 // create a temporary error set which is used for the comptime/inline function 3894 // call alone, independent from the runtime instantiation. 3895 if (func_ty_info.return_type.castTag(.error_union)) |payload| { 3896 const error_set_ty = payload.data.error_set; 3897 break :blk try Type.Tag.error_union.create(sema.arena, .{ 3898 .error_set = error_set_ty, 3899 .payload = bare_return_type, 3900 }); 3901 } 3902 break :blk bare_return_type; 3903 }; 3904 const parent_fn_ret_ty = sema.fn_ret_ty; 3905 sema.fn_ret_ty = fn_ret_ty; 3906 defer sema.fn_ret_ty = parent_fn_ret_ty; 3907 3908 // This `res2` is here instead of directly breaking from `res` due to a stage1 3909 // bug generating invalid LLVM IR. 3910 const res2: Air.Inst.Ref = res2: { 3911 if (is_comptime_call) { 3912 if (mod.memoized_calls.get(memoized_call_key)) |result| { 3913 const ty_inst = try sema.addType(fn_ret_ty); 3914 try sema.air_values.append(gpa, result.val); 3915 sema.air_instructions.set(block_inst, .{ 3916 .tag = .constant, 3917 .data = .{ .ty_pl = .{ 3918 .ty = ty_inst, 3919 .payload = @intCast(u32, sema.air_values.items.len - 1), 3920 } }, 3921 }); 3922 break :res2 Air.indexToRef(block_inst); 3923 } 3924 } 3925 3926 const result = result: { 3927 _ = sema.analyzeBody(&child_block, fn_info.body) catch |err| switch (err) { 3928 error.ComptimeReturn => break :result inlining.comptime_result, 3929 else => |e| return e, 3930 }; 3931 break :result try sema.analyzeBlockBody(block, call_src, &child_block, merges); 3932 }; 3933 3934 if (is_comptime_call) { 3935 const result_val = try sema.resolveConstMaybeUndefVal(block, call_src, result); 3936 3937 // TODO: check whether any external comptime memory was mutated by the 3938 // comptime function call. If so, then do not memoize the call here. 3939 // TODO: re-evaluate whether memoized_calls needs its own arena. I think 3940 // it should be fine to use the Decl arena for the function. 3941 { 3942 var arena_allocator = std.heap.ArenaAllocator.init(gpa); 3943 errdefer arena_allocator.deinit(); 3944 const arena = arena_allocator.allocator(); 3945 3946 for (memoized_call_key.args) |*arg| { 3947 arg.* = try arg.*.copy(arena); 3948 } 3949 3950 try mod.memoized_calls.put(gpa, memoized_call_key, .{ 3951 .val = try result_val.copy(arena), 3952 .arena = arena_allocator.state, 3953 }); 3954 delete_memoized_call_key = false; 3955 } 3956 } 3957 3958 break :res2 result; 3959 }; 3960 3961 try wip_captures.finalize(); 3962 3963 break :res res2; 3964 } else if (func_ty_info.is_generic) res: { 3965 const func_val = try sema.resolveConstValue(block, func_src, func); 3966 const module_fn = switch (func_val.tag()) { 3967 .function => func_val.castTag(.function).?.data, 3968 .decl_ref => func_val.castTag(.decl_ref).?.data.val.castTag(.function).?.data, 3969 else => unreachable, 3970 }; 3971 // Check the Module's generic function map with an adapted context, so that we 3972 // can match against `uncasted_args` rather than doing the work below to create a 3973 // generic Scope only to junk it if it matches an existing instantiation. 3974 const namespace = module_fn.owner_decl.src_namespace; 3975 const fn_zir = namespace.file_scope.zir; 3976 const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst); 3977 const zir_tags = fn_zir.instructions.items(.tag); 3978 3979 // This hash must match `Module.MonomorphedFuncsContext.hash`. 3980 // For parameters explicitly marked comptime and simple parameter type expressions, 3981 // we know whether a parameter is elided from a monomorphed function, and can 3982 // use it in the hash here. However, for parameter type expressions that are not 3983 // explicitly marked comptime and rely on previous parameter comptime values, we 3984 // don't find out until after generating a monomorphed function whether the parameter 3985 // type ended up being a "must-be-comptime-known" type. 3986 var hasher = std.hash.Wyhash.init(0); 3987 std.hash.autoHash(&hasher, @ptrToInt(module_fn)); 3988 3989 const comptime_tvs = try sema.arena.alloc(TypedValue, func_ty_info.param_types.len); 3990 3991 for (func_ty_info.param_types) |param_ty, i| { 3992 const is_comptime = func_ty_info.paramIsComptime(i); 3993 if (is_comptime) { 3994 const arg_src = call_src; // TODO better source location 3995 const casted_arg = try sema.coerce(block, param_ty, uncasted_args[i], arg_src); 3996 if (try sema.resolveMaybeUndefVal(block, arg_src, casted_arg)) |arg_val| { 3997 if (param_ty.tag() != .generic_poison) { 3998 arg_val.hash(param_ty, &hasher); 3999 } 4000 comptime_tvs[i] = .{ 4001 // This will be different than `param_ty` in the case of `generic_poison`. 4002 .ty = sema.typeOf(casted_arg), 4003 .val = arg_val, 4004 }; 4005 } else { 4006 return sema.failWithNeededComptime(block, arg_src); 4007 } 4008 } else { 4009 comptime_tvs[i] = .{ 4010 .ty = sema.typeOf(uncasted_args[i]), 4011 .val = Value.initTag(.generic_poison), 4012 }; 4013 } 4014 } 4015 4016 const precomputed_hash = hasher.final(); 4017 4018 const adapter: GenericCallAdapter = .{ 4019 .generic_fn = module_fn, 4020 .precomputed_hash = precomputed_hash, 4021 .func_ty_info = func_ty_info, 4022 .comptime_tvs = comptime_tvs, 4023 }; 4024 const gop = try mod.monomorphed_funcs.getOrPutAdapted(gpa, {}, adapter); 4025 if (gop.found_existing) { 4026 const callee_func = gop.key_ptr.*; 4027 break :res try sema.finishGenericCall( 4028 block, 4029 call_src, 4030 callee_func, 4031 func_src, 4032 uncasted_args, 4033 fn_info, 4034 zir_tags, 4035 ); 4036 } 4037 const new_module_func = try gpa.create(Module.Fn); 4038 gop.key_ptr.* = new_module_func; 4039 { 4040 errdefer gpa.destroy(new_module_func); 4041 const remove_adapter: GenericRemoveAdapter = .{ 4042 .precomputed_hash = precomputed_hash, 4043 }; 4044 errdefer assert(mod.monomorphed_funcs.removeAdapted(new_module_func, remove_adapter)); 4045 4046 try namespace.anon_decls.ensureUnusedCapacity(gpa, 1); 4047 4048 // Create a Decl for the new function. 4049 const src_decl = namespace.getDecl(); 4050 // TODO better names for generic function instantiations 4051 const name_index = mod.getNextAnonNameIndex(); 4052 const decl_name = try std.fmt.allocPrintZ(gpa, "{s}__anon_{d}", .{ 4053 module_fn.owner_decl.name, name_index, 4054 }); 4055 const new_decl = try mod.allocateNewDecl(decl_name, namespace, module_fn.owner_decl.src_node, src_decl.src_scope); 4056 new_decl.src_line = module_fn.owner_decl.src_line; 4057 new_decl.is_pub = module_fn.owner_decl.is_pub; 4058 new_decl.is_exported = module_fn.owner_decl.is_exported; 4059 new_decl.has_align = module_fn.owner_decl.has_align; 4060 new_decl.has_linksection_or_addrspace = module_fn.owner_decl.has_linksection_or_addrspace; 4061 new_decl.@"addrspace" = module_fn.owner_decl.@"addrspace"; 4062 new_decl.zir_decl_index = module_fn.owner_decl.zir_decl_index; 4063 new_decl.alive = true; // This Decl is called at runtime. 4064 new_decl.has_tv = true; 4065 new_decl.owns_tv = true; 4066 new_decl.analysis = .in_progress; 4067 new_decl.generation = mod.generation; 4068 4069 namespace.anon_decls.putAssumeCapacityNoClobber(new_decl, {}); 4070 4071 // The generic function Decl is guaranteed to be the first dependency 4072 // of each of its instantiations. 4073 assert(new_decl.dependencies.keys().len == 0); 4074 try mod.declareDeclDependency(new_decl, module_fn.owner_decl); 4075 4076 var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); 4077 errdefer new_decl_arena.deinit(); 4078 const new_decl_arena_allocator = new_decl_arena.allocator(); 4079 4080 // Re-run the block that creates the function, with the comptime parameters 4081 // pre-populated inside `inst_map`. This causes `param_comptime` and 4082 // `param_anytype_comptime` ZIR instructions to be ignored, resulting in a 4083 // new, monomorphized function, with the comptime parameters elided. 4084 var child_sema: Sema = .{ 4085 .mod = mod, 4086 .gpa = gpa, 4087 .arena = sema.arena, 4088 .perm_arena = new_decl_arena_allocator, 4089 .code = fn_zir, 4090 .owner_decl = new_decl, 4091 .func = null, 4092 .fn_ret_ty = Type.void, 4093 .owner_func = null, 4094 .comptime_args = try new_decl_arena_allocator.alloc(TypedValue, uncasted_args.len), 4095 .comptime_args_fn_inst = module_fn.zir_body_inst, 4096 .preallocated_new_func = new_module_func, 4097 }; 4098 defer child_sema.deinit(); 4099 4100 var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope); 4101 defer wip_captures.deinit(); 4102 4103 var child_block: Block = .{ 4104 .parent = null, 4105 .sema = &child_sema, 4106 .src_decl = new_decl, 4107 .namespace = namespace, 4108 .wip_capture_scope = wip_captures.scope, 4109 .instructions = .{}, 4110 .inlining = null, 4111 .is_comptime = true, 4112 }; 4113 defer { 4114 child_block.instructions.deinit(gpa); 4115 child_block.params.deinit(gpa); 4116 } 4117 4118 try child_sema.inst_map.ensureUnusedCapacity(gpa, @intCast(u32, uncasted_args.len)); 4119 var arg_i: usize = 0; 4120 for (fn_info.param_body) |inst| { 4121 var is_comptime = false; 4122 var is_anytype = false; 4123 switch (zir_tags[inst]) { 4124 .param => { 4125 is_comptime = func_ty_info.paramIsComptime(arg_i); 4126 }, 4127 .param_comptime => { 4128 is_comptime = true; 4129 }, 4130 .param_anytype => { 4131 is_anytype = true; 4132 is_comptime = func_ty_info.paramIsComptime(arg_i); 4133 }, 4134 .param_anytype_comptime => { 4135 is_anytype = true; 4136 is_comptime = true; 4137 }, 4138 else => continue, 4139 } 4140 const arg_src = call_src; // TODO: better source location 4141 const arg = uncasted_args[arg_i]; 4142 if (is_comptime) { 4143 if (try sema.resolveMaybeUndefVal(block, arg_src, arg)) |arg_val| { 4144 const child_arg = try child_sema.addConstant(sema.typeOf(arg), arg_val); 4145 child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg); 4146 } else { 4147 return sema.failWithNeededComptime(block, arg_src); 4148 } 4149 } else if (is_anytype) { 4150 // We insert into the map an instruction which is runtime-known 4151 // but has the type of the argument. 4152 const child_arg = try child_block.addArg(sema.typeOf(arg), 0); 4153 child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg); 4154 } 4155 arg_i += 1; 4156 } 4157 const new_func_inst = child_sema.resolveBody(&child_block, fn_info.param_body) catch |err| { 4158 // TODO look up the compile error that happened here and attach a note to it 4159 // pointing here, at the generic instantiation callsite. 4160 if (sema.owner_func) |owner_func| { 4161 owner_func.state = .dependency_failure; 4162 } else { 4163 sema.owner_decl.analysis = .dependency_failure; 4164 } 4165 return err; 4166 }; 4167 const new_func_val = child_sema.resolveConstValue(&child_block, .unneeded, new_func_inst) catch unreachable; 4168 const new_func = new_func_val.castTag(.function).?.data; 4169 assert(new_func == new_module_func); 4170 4171 arg_i = 0; 4172 for (fn_info.param_body) |inst| { 4173 switch (zir_tags[inst]) { 4174 .param_comptime, .param_anytype_comptime, .param, .param_anytype => {}, 4175 else => continue, 4176 } 4177 const arg = child_sema.inst_map.get(inst).?; 4178 const copied_arg_ty = try child_sema.typeOf(arg).copy(new_decl_arena_allocator); 4179 if (child_sema.resolveMaybeUndefValAllowVariables( 4180 &child_block, 4181 .unneeded, 4182 arg, 4183 ) catch unreachable) |arg_val| { 4184 child_sema.comptime_args[arg_i] = .{ 4185 .ty = copied_arg_ty, 4186 .val = try arg_val.copy(new_decl_arena_allocator), 4187 }; 4188 } else { 4189 child_sema.comptime_args[arg_i] = .{ 4190 .ty = copied_arg_ty, 4191 .val = Value.initTag(.generic_poison), 4192 }; 4193 } 4194 4195 arg_i += 1; 4196 } 4197 4198 try wip_captures.finalize(); 4199 4200 // Populate the Decl ty/val with the function and its type. 4201 new_decl.ty = try child_sema.typeOf(new_func_inst).copy(new_decl_arena_allocator); 4202 new_decl.val = try Value.Tag.function.create(new_decl_arena_allocator, new_func); 4203 new_decl.analysis = .complete; 4204 4205 log.debug("generic function '{s}' instantiated with type {}", .{ 4206 new_decl.name, new_decl.ty, 4207 }); 4208 assert(!new_decl.ty.fnInfo().is_generic); 4209 4210 // Queue up a `codegen_func` work item for the new Fn. The `comptime_args` field 4211 // will be populated, ensuring it will have `analyzeBody` called with the ZIR 4212 // parameters mapped appropriately. 4213 try mod.comp.bin_file.allocateDeclIndexes(new_decl); 4214 try mod.comp.work_queue.writeItem(.{ .codegen_func = new_func }); 4215 4216 try new_decl.finalizeNewArena(&new_decl_arena); 4217 } 4218 4219 break :res try sema.finishGenericCall( 4220 block, 4221 call_src, 4222 new_module_func, 4223 func_src, 4224 uncasted_args, 4225 fn_info, 4226 zir_tags, 4227 ); 4228 } else res: { 4229 try sema.requireRuntimeBlock(block, call_src); 4230 4231 const args = try sema.arena.alloc(Air.Inst.Ref, uncasted_args.len); 4232 for (uncasted_args) |uncasted_arg, i| { 4233 const arg_src = call_src; // TODO: better source location 4234 if (i < fn_params_len) { 4235 const param_ty = func_ty.fnParamType(i); 4236 try sema.resolveTypeLayout(block, arg_src, param_ty); 4237 args[i] = try sema.coerce(block, param_ty, uncasted_arg, arg_src); 4238 } else { 4239 args[i] = uncasted_arg; 4240 } 4241 } 4242 4243 try sema.resolveTypeLayout(block, call_src, func_ty_info.return_type); 4244 4245 try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).Struct.fields.len + 4246 args.len); 4247 const func_inst = try block.addInst(.{ 4248 .tag = .call, 4249 .data = .{ .pl_op = .{ 4250 .operand = func, 4251 .payload = sema.addExtraAssumeCapacity(Air.Call{ 4252 .args_len = @intCast(u32, args.len), 4253 }), 4254 } }, 4255 }); 4256 sema.appendRefsAssumeCapacity(args); 4257 break :res func_inst; 4258 }; 4259 4260 if (ensure_result_used) { 4261 try sema.ensureResultUsed(block, result, call_src); 4262 } 4263 return result; 4264} 4265 4266fn finishGenericCall( 4267 sema: *Sema, 4268 block: *Block, 4269 call_src: LazySrcLoc, 4270 callee: *Module.Fn, 4271 func_src: LazySrcLoc, 4272 uncasted_args: []const Air.Inst.Ref, 4273 fn_info: Zir.FnInfo, 4274 zir_tags: []const Zir.Inst.Tag, 4275) CompileError!Air.Inst.Ref { 4276 const callee_inst = try sema.analyzeDeclVal(block, func_src, callee.owner_decl); 4277 4278 // Make a runtime call to the new function, making sure to omit the comptime args. 4279 try sema.requireRuntimeBlock(block, call_src); 4280 4281 const comptime_args = callee.comptime_args.?; 4282 const runtime_args_len = count: { 4283 var count: u32 = 0; 4284 var arg_i: usize = 0; 4285 for (fn_info.param_body) |inst| { 4286 switch (zir_tags[inst]) { 4287 .param_comptime, .param_anytype_comptime, .param, .param_anytype => { 4288 if (comptime_args[arg_i].val.tag() == .generic_poison) { 4289 count += 1; 4290 } 4291 arg_i += 1; 4292 }, 4293 else => continue, 4294 } 4295 } 4296 break :count count; 4297 }; 4298 const runtime_args = try sema.arena.alloc(Air.Inst.Ref, runtime_args_len); 4299 { 4300 const new_fn_ty = callee.owner_decl.ty; 4301 var runtime_i: u32 = 0; 4302 var total_i: u32 = 0; 4303 for (fn_info.param_body) |inst| { 4304 switch (zir_tags[inst]) { 4305 .param_comptime, .param_anytype_comptime, .param, .param_anytype => {}, 4306 else => continue, 4307 } 4308 const is_runtime = comptime_args[total_i].val.tag() == .generic_poison; 4309 if (is_runtime) { 4310 const param_ty = new_fn_ty.fnParamType(runtime_i); 4311 const arg_src = call_src; // TODO: better source location 4312 const uncasted_arg = uncasted_args[total_i]; 4313 try sema.resolveTypeLayout(block, arg_src, param_ty); 4314 const casted_arg = try sema.coerce(block, param_ty, uncasted_arg, arg_src); 4315 runtime_args[runtime_i] = casted_arg; 4316 runtime_i += 1; 4317 } 4318 total_i += 1; 4319 } 4320 4321 try sema.resolveTypeLayout(block, call_src, new_fn_ty.fnReturnType()); 4322 } 4323 try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + 4324 runtime_args_len); 4325 const func_inst = try block.addInst(.{ 4326 .tag = .call, 4327 .data = .{ .pl_op = .{ 4328 .operand = callee_inst, 4329 .payload = sema.addExtraAssumeCapacity(Air.Call{ 4330 .args_len = runtime_args_len, 4331 }), 4332 } }, 4333 }); 4334 sema.appendRefsAssumeCapacity(runtime_args); 4335 return func_inst; 4336} 4337 4338fn zirIntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 4339 _ = block; 4340 const tracy = trace(@src()); 4341 defer tracy.end(); 4342 4343 const int_type = sema.code.instructions.items(.data)[inst].int_type; 4344 const ty = try Module.makeIntType(sema.arena, int_type.signedness, int_type.bit_count); 4345 4346 return sema.addType(ty); 4347} 4348 4349fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 4350 const tracy = trace(@src()); 4351 defer tracy.end(); 4352 4353 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 4354 const src = inst_data.src(); 4355 const child_type = try sema.resolveType(block, src, inst_data.operand); 4356 const opt_type = try Type.optional(sema.arena, child_type); 4357 4358 return sema.addType(opt_type); 4359} 4360 4361fn zirElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 4362 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 4363 const src = inst_data.src(); 4364 const array_type = try sema.resolveType(block, src, inst_data.operand); 4365 const elem_type = array_type.elemType(); 4366 return sema.addType(elem_type); 4367} 4368 4369fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 4370 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 4371 const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 4372 const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 4373 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 4374 const len = try sema.resolveAlreadyCoercedInt(block, len_src, extra.lhs, u32); 4375 const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs); 4376 const vector_type = try Type.Tag.vector.create(sema.arena, .{ 4377 .len = len, 4378 .elem_type = elem_type, 4379 }); 4380 return sema.addType(vector_type); 4381} 4382 4383fn zirArrayType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 4384 const tracy = trace(@src()); 4385 defer tracy.end(); 4386 4387 const bin_inst = sema.code.instructions.items(.data)[inst].bin; 4388 const len = try sema.resolveInt(block, .unneeded, bin_inst.lhs, Type.usize); 4389 const elem_type = try sema.resolveType(block, .unneeded, bin_inst.rhs); 4390 const array_ty = try Type.array(sema.arena, len, null, elem_type); 4391 4392 return sema.addType(array_ty); 4393} 4394 4395fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 4396 const tracy = trace(@src()); 4397 defer tracy.end(); 4398 4399 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 4400 const extra = sema.code.extraData(Zir.Inst.ArrayTypeSentinel, inst_data.payload_index).data; 4401 const len_src: LazySrcLoc = .{ .node_offset_array_type_len = inst_data.src_node }; 4402 const sentinel_src: LazySrcLoc = .{ .node_offset_array_type_sentinel = inst_data.src_node }; 4403 const elem_src: LazySrcLoc = .{ .node_offset_array_type_elem = inst_data.src_node }; 4404 const len = try sema.resolveInt(block, len_src, extra.len, Type.usize); 4405 const elem_type = try sema.resolveType(block, elem_src, extra.elem_type); 4406 const uncasted_sentinel = sema.resolveInst(extra.sentinel); 4407 const sentinel = try sema.coerce(block, elem_type, uncasted_sentinel, sentinel_src); 4408 const sentinel_val = try sema.resolveConstValue(block, sentinel_src, sentinel); 4409 const array_ty = try Type.array(sema.arena, len, sentinel_val, elem_type); 4410 4411 return sema.addType(array_ty); 4412} 4413 4414fn zirAnyframeType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 4415 const tracy = trace(@src()); 4416 defer tracy.end(); 4417 4418 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 4419 const operand_src: LazySrcLoc = .{ .node_offset_anyframe_type = inst_data.src_node }; 4420 const return_type = try sema.resolveType(block, operand_src, inst_data.operand); 4421 const anyframe_type = try Type.Tag.anyframe_T.create(sema.arena, return_type); 4422 4423 return sema.addType(anyframe_type); 4424} 4425 4426fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 4427 const tracy = trace(@src()); 4428 defer tracy.end(); 4429 4430 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 4431 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 4432 const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; 4433 const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; 4434 const error_union = try sema.resolveType(block, lhs_src, extra.lhs); 4435 const payload = try sema.resolveType(block, rhs_src, extra.rhs); 4436 4437 if (error_union.zigTypeTag() != .ErrorSet) { 4438 return sema.fail(block, lhs_src, "expected error set type, found {}", .{ 4439 error_union.elemType(), 4440 }); 4441 } 4442 const err_union_ty = try Module.errorUnionType(sema.arena, error_union, payload); 4443 return sema.addType(err_union_ty); 4444} 4445 4446fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 4447 _ = block; 4448 const tracy = trace(@src()); 4449 defer tracy.end(); 4450 4451 const inst_data = sema.code.instructions.items(.data)[inst].str_tok; 4452 4453 // Create an anonymous error set type with only this error value, and return the value. 4454 const kv = try sema.mod.getErrorValue(inst_data.get(sema.code)); 4455 const result_type = try Type.Tag.error_set_single.create(sema.arena, kv.key); 4456 return sema.addConstant( 4457 result_type, 4458 try Value.Tag.@"error".create(sema.arena, .{ 4459 .name = kv.key, 4460 }), 4461 ); 4462} 4463 4464fn zirErrorToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 4465 const tracy = trace(@src()); 4466 defer tracy.end(); 4467 4468 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 4469 const src = inst_data.src(); 4470 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 4471 const op = sema.resolveInst(inst_data.operand); 4472 const op_coerced = try sema.coerce(block, Type.anyerror, op, operand_src); 4473 const result_ty = Type.initTag(.u16); 4474 4475 if (try sema.resolveMaybeUndefVal(block, src, op_coerced)) |val| { 4476 if (val.isUndef()) { 4477 return sema.addConstUndef(result_ty); 4478 } 4479 const payload = try sema.arena.create(Value.Payload.U64); 4480 payload.* = .{ 4481 .base = .{ .tag = .int_u64 }, 4482 .data = (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value, 4483 }; 4484 return sema.addConstant(result_ty, Value.initPayload(&payload.base)); 4485 } 4486 4487 try sema.requireRuntimeBlock(block, src); 4488 return block.addBitCast(result_ty, op_coerced); 4489} 4490 4491fn zirIntToError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 4492 const tracy = trace(@src()); 4493 defer tracy.end(); 4494 4495 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 4496 const src = inst_data.src(); 4497 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 4498 4499 const op = sema.resolveInst(inst_data.operand); 4500 4501 if (try sema.resolveDefinedValue(block, operand_src, op)) |value| { 4502 const int = value.toUnsignedInt(); 4503 if (int > sema.mod.global_error_set.count() or int == 0) 4504 return sema.fail(block, operand_src, "integer value {d} represents no error", .{int}); 4505 const payload = try sema.arena.create(Value.Payload.Error); 4506 payload.* = .{ 4507 .base = .{ .tag = .@"error" }, 4508 .data = .{ .name = sema.mod.error_name_list.items[@intCast(usize, int)] }, 4509 }; 4510 return sema.addConstant(Type.anyerror, Value.initPayload(&payload.base)); 4511 } 4512 try sema.requireRuntimeBlock(block, src); 4513 if (block.wantSafety()) { 4514 return sema.fail(block, src, "TODO: get max errors in compilation", .{}); 4515 // const is_gt_max = @panic("TODO get max errors in compilation"); 4516 // try sema.addSafetyCheck(block, is_gt_max, .invalid_error_code); 4517 } 4518 return block.addInst(.{ 4519 .tag = .bitcast, 4520 .data = .{ .ty_op = .{ 4521 .ty = Air.Inst.Ref.anyerror_type, 4522 .operand = op, 4523 } }, 4524 }); 4525} 4526 4527fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 4528 const tracy = trace(@src()); 4529 defer tracy.end(); 4530 4531 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 4532 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 4533 const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; 4534 const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; 4535 const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; 4536 const lhs = sema.resolveInst(extra.lhs); 4537 const rhs = sema.resolveInst(extra.rhs); 4538 if (sema.typeOf(lhs).zigTypeTag() == .Bool and sema.typeOf(rhs).zigTypeTag() == .Bool) { 4539 const msg = msg: { 4540 const msg = try sema.errMsg(block, lhs_src, "expected error set type, found 'bool'", .{}); 4541 errdefer msg.destroy(sema.gpa); 4542 try sema.errNote(block, src, msg, "'||' merges error sets; 'or' performs boolean OR", .{}); 4543 break :msg msg; 4544 }; 4545 return sema.failWithOwnedErrorMsg(msg); 4546 } 4547 const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs); 4548 const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs); 4549 if (lhs_ty.zigTypeTag() != .ErrorSet) 4550 return sema.fail(block, lhs_src, "expected error set type, found {}", .{lhs_ty}); 4551 if (rhs_ty.zigTypeTag() != .ErrorSet) 4552 return sema.fail(block, rhs_src, "expected error set type, found {}", .{rhs_ty}); 4553 4554 // Anything merged with anyerror is anyerror. 4555 if (lhs_ty.tag() == .anyerror or rhs_ty.tag() == .anyerror) { 4556 return Air.Inst.Ref.anyerror_type; 4557 } 4558 // Resolve both error sets now. 4559 var set: std.StringHashMapUnmanaged(void) = .{}; 4560 defer set.deinit(sema.gpa); 4561 4562 switch (lhs_ty.tag()) { 4563 .error_set_single => { 4564 const name = lhs_ty.castTag(.error_set_single).?.data; 4565 try set.put(sema.gpa, name, {}); 4566 }, 4567 .error_set_merged => { 4568 const names = lhs_ty.castTag(.error_set_merged).?.data; 4569 for (names) |name| { 4570 try set.put(sema.gpa, name, {}); 4571 } 4572 }, 4573 .error_set => { 4574 const lhs_set = lhs_ty.castTag(.error_set).?.data; 4575 try set.ensureUnusedCapacity(sema.gpa, lhs_set.names_len); 4576 for (lhs_set.names_ptr[0..lhs_set.names_len]) |name| { 4577 set.putAssumeCapacityNoClobber(name, {}); 4578 } 4579 }, 4580 else => unreachable, 4581 } 4582 switch (rhs_ty.tag()) { 4583 .error_set_single => { 4584 const name = rhs_ty.castTag(.error_set_single).?.data; 4585 try set.put(sema.gpa, name, {}); 4586 }, 4587 .error_set_merged => { 4588 const names = rhs_ty.castTag(.error_set_merged).?.data; 4589 for (names) |name| { 4590 try set.put(sema.gpa, name, {}); 4591 } 4592 }, 4593 .error_set => { 4594 const rhs_set = rhs_ty.castTag(.error_set).?.data; 4595 try set.ensureUnusedCapacity(sema.gpa, rhs_set.names_len); 4596 for (rhs_set.names_ptr[0..rhs_set.names_len]) |name| { 4597 set.putAssumeCapacity(name, {}); 4598 } 4599 }, 4600 else => unreachable, 4601 } 4602 4603 // TODO do we really want to create a Decl for this? 4604 // The reason we do it right now is for memory management. 4605 var anon_decl = try block.startAnonDecl(); 4606 defer anon_decl.deinit(); 4607 4608 const new_names = try anon_decl.arena().alloc([]const u8, set.count()); 4609 var it = set.keyIterator(); 4610 var i: usize = 0; 4611 while (it.next()) |key| : (i += 1) { 4612 new_names[i] = key.*; 4613 } 4614 4615 const err_set_ty = try Type.Tag.error_set_merged.create(anon_decl.arena(), new_names); 4616 const err_set_decl = try anon_decl.finish( 4617 Type.type, 4618 try Value.Tag.ty.create(anon_decl.arena(), err_set_ty), 4619 ); 4620 try sema.mod.declareDeclDependency(sema.owner_decl, err_set_decl); 4621 return sema.addType(err_set_ty); 4622} 4623 4624fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 4625 _ = block; 4626 const tracy = trace(@src()); 4627 defer tracy.end(); 4628 4629 const inst_data = sema.code.instructions.items(.data)[inst].str_tok; 4630 const duped_name = try sema.arena.dupe(u8, inst_data.get(sema.code)); 4631 return sema.addConstant( 4632 Type.initTag(.enum_literal), 4633 try Value.Tag.enum_literal.create(sema.arena, duped_name), 4634 ); 4635} 4636 4637fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 4638 const arena = sema.arena; 4639 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 4640 const src = inst_data.src(); 4641 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 4642 const operand = sema.resolveInst(inst_data.operand); 4643 const operand_ty = sema.typeOf(operand); 4644 4645 const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag()) { 4646 .Enum => operand, 4647 .Union => { 4648 //if (!operand_ty.unionHasTag()) { 4649 // return sema.fail( 4650 // block, 4651 // operand_src, 4652 // "untagged union '{}' cannot be converted to integer", 4653 // .{dest_ty_src}, 4654 // ); 4655 //} 4656 return sema.fail(block, operand_src, "TODO zirEnumToInt for tagged unions", .{}); 4657 }, 4658 else => { 4659 return sema.fail(block, operand_src, "expected enum or tagged union, found {}", .{ 4660 operand_ty, 4661 }); 4662 }, 4663 }; 4664 const enum_tag_ty = sema.typeOf(enum_tag); 4665 4666 var int_tag_type_buffer: Type.Payload.Bits = undefined; 4667 const int_tag_ty = try enum_tag_ty.intTagType(&int_tag_type_buffer).copy(arena); 4668 4669 if (try sema.typeHasOnePossibleValue(block, src, enum_tag_ty)) |opv| { 4670 return sema.addConstant(int_tag_ty, opv); 4671 } 4672 4673 if (try sema.resolveMaybeUndefVal(block, operand_src, enum_tag)) |enum_tag_val| { 4674 var buffer: Value.Payload.U64 = undefined; 4675 const val = enum_tag_val.enumToInt(enum_tag_ty, &buffer); 4676 return sema.addConstant(int_tag_ty, try val.copy(sema.arena)); 4677 } 4678 4679 try sema.requireRuntimeBlock(block, src); 4680 return block.addBitCast(int_tag_ty, enum_tag); 4681} 4682 4683fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 4684 const target = sema.mod.getTarget(); 4685 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 4686 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 4687 const src = inst_data.src(); 4688 const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 4689 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 4690 const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); 4691 const operand = sema.resolveInst(extra.rhs); 4692 4693 if (dest_ty.zigTypeTag() != .Enum) { 4694 return sema.fail(block, dest_ty_src, "expected enum, found {}", .{dest_ty}); 4695 } 4696 4697 if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |int_val| { 4698 if (dest_ty.isNonexhaustiveEnum()) { 4699 return sema.addConstant(dest_ty, int_val); 4700 } 4701 if (int_val.isUndef()) { 4702 return sema.failWithUseOfUndef(block, operand_src); 4703 } 4704 if (!dest_ty.enumHasInt(int_val, target)) { 4705 const msg = msg: { 4706 const msg = try sema.errMsg( 4707 block, 4708 src, 4709 "enum '{}' has no tag with value {}", 4710 .{ dest_ty, int_val }, 4711 ); 4712 errdefer msg.destroy(sema.gpa); 4713 try sema.mod.errNoteNonLazy( 4714 dest_ty.declSrcLoc(), 4715 msg, 4716 "enum declared here", 4717 .{}, 4718 ); 4719 break :msg msg; 4720 }; 4721 return sema.failWithOwnedErrorMsg(msg); 4722 } 4723 return sema.addConstant(dest_ty, int_val); 4724 } 4725 4726 try sema.requireRuntimeBlock(block, src); 4727 // TODO insert safety check to make sure the value matches an enum value 4728 return block.addTyOp(.intcast, dest_ty, operand); 4729} 4730 4731/// Pointer in, pointer out. 4732fn zirOptionalPayloadPtr( 4733 sema: *Sema, 4734 block: *Block, 4735 inst: Zir.Inst.Index, 4736 safety_check: bool, 4737) CompileError!Air.Inst.Ref { 4738 const tracy = trace(@src()); 4739 defer tracy.end(); 4740 4741 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 4742 const optional_ptr = sema.resolveInst(inst_data.operand); 4743 const optional_ptr_ty = sema.typeOf(optional_ptr); 4744 assert(optional_ptr_ty.zigTypeTag() == .Pointer); 4745 const src = inst_data.src(); 4746 4747 const opt_type = optional_ptr_ty.elemType(); 4748 if (opt_type.zigTypeTag() != .Optional) { 4749 return sema.fail(block, src, "expected optional type, found {}", .{opt_type}); 4750 } 4751 4752 const child_type = try opt_type.optionalChildAlloc(sema.arena); 4753 const child_pointer = try Type.ptr(sema.arena, .{ 4754 .pointee_type = child_type, 4755 .mutable = !optional_ptr_ty.isConstPtr(), 4756 .@"addrspace" = optional_ptr_ty.ptrAddressSpace(), 4757 }); 4758 4759 if (try sema.resolveDefinedValue(block, src, optional_ptr)) |pointer_val| { 4760 if (try sema.pointerDeref(block, src, pointer_val, optional_ptr_ty)) |val| { 4761 if (val.isNull()) { 4762 return sema.fail(block, src, "unable to unwrap null", .{}); 4763 } 4764 // The same Value represents the pointer to the optional and the payload. 4765 return sema.addConstant( 4766 child_pointer, 4767 try Value.Tag.opt_payload_ptr.create(sema.arena, pointer_val), 4768 ); 4769 } 4770 } 4771 4772 try sema.requireRuntimeBlock(block, src); 4773 if (safety_check and block.wantSafety()) { 4774 const is_non_null = try block.addUnOp(.is_non_null_ptr, optional_ptr); 4775 try sema.addSafetyCheck(block, is_non_null, .unwrap_null); 4776 } 4777 return block.addTyOp(.optional_payload_ptr, child_pointer, optional_ptr); 4778} 4779 4780/// Value in, value out. 4781fn zirOptionalPayload( 4782 sema: *Sema, 4783 block: *Block, 4784 inst: Zir.Inst.Index, 4785 safety_check: bool, 4786) CompileError!Air.Inst.Ref { 4787 const tracy = trace(@src()); 4788 defer tracy.end(); 4789 4790 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 4791 const src = inst_data.src(); 4792 const operand = sema.resolveInst(inst_data.operand); 4793 const operand_ty = sema.typeOf(operand); 4794 const result_ty = switch (operand_ty.zigTypeTag()) { 4795 .Optional => try operand_ty.optionalChildAlloc(sema.arena), 4796 .Pointer => t: { 4797 if (operand_ty.ptrSize() != .C) { 4798 return sema.failWithExpectedOptionalType(block, src, operand_ty); 4799 } 4800 const ptr_info = operand_ty.ptrInfo().data; 4801 break :t try Type.ptr(sema.arena, .{ 4802 .pointee_type = try ptr_info.pointee_type.copy(sema.arena), 4803 .@"align" = ptr_info.@"align", 4804 .@"addrspace" = ptr_info.@"addrspace", 4805 .mutable = ptr_info.mutable, 4806 .@"allowzero" = ptr_info.@"allowzero", 4807 .@"volatile" = ptr_info.@"volatile", 4808 .size = .One, 4809 }); 4810 }, 4811 else => return sema.failWithExpectedOptionalType(block, src, operand_ty), 4812 }; 4813 4814 if (try sema.resolveDefinedValue(block, src, operand)) |val| { 4815 if (val.isNull()) { 4816 return sema.fail(block, src, "unable to unwrap null", .{}); 4817 } 4818 if (val.castTag(.opt_payload)) |payload| { 4819 return sema.addConstant(result_ty, payload.data); 4820 } 4821 return sema.addConstant(result_ty, val); 4822 } 4823 4824 try sema.requireRuntimeBlock(block, src); 4825 if (safety_check and block.wantSafety()) { 4826 const is_non_null = try block.addUnOp(.is_non_null, operand); 4827 try sema.addSafetyCheck(block, is_non_null, .unwrap_null); 4828 } 4829 return block.addTyOp(.optional_payload, result_ty, operand); 4830} 4831 4832/// Value in, value out 4833fn zirErrUnionPayload( 4834 sema: *Sema, 4835 block: *Block, 4836 inst: Zir.Inst.Index, 4837 safety_check: bool, 4838) CompileError!Air.Inst.Ref { 4839 const tracy = trace(@src()); 4840 defer tracy.end(); 4841 4842 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 4843 const src = inst_data.src(); 4844 const operand = sema.resolveInst(inst_data.operand); 4845 const operand_src = src; 4846 const operand_ty = sema.typeOf(operand); 4847 if (operand_ty.zigTypeTag() != .ErrorUnion) 4848 return sema.fail(block, operand_src, "expected error union type, found '{}'", .{operand_ty}); 4849 4850 if (try sema.resolveDefinedValue(block, src, operand)) |val| { 4851 if (val.getError()) |name| { 4852 return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); 4853 } 4854 const data = val.castTag(.eu_payload).?.data; 4855 const result_ty = operand_ty.errorUnionPayload(); 4856 return sema.addConstant(result_ty, data); 4857 } 4858 try sema.requireRuntimeBlock(block, src); 4859 if (safety_check and block.wantSafety()) { 4860 const is_non_err = try block.addUnOp(.is_err, operand); 4861 try sema.addSafetyCheck(block, is_non_err, .unwrap_errunion); 4862 } 4863 const result_ty = operand_ty.errorUnionPayload(); 4864 return block.addTyOp(.unwrap_errunion_payload, result_ty, operand); 4865} 4866 4867/// Pointer in, pointer out. 4868fn zirErrUnionPayloadPtr( 4869 sema: *Sema, 4870 block: *Block, 4871 inst: Zir.Inst.Index, 4872 safety_check: bool, 4873) CompileError!Air.Inst.Ref { 4874 const tracy = trace(@src()); 4875 defer tracy.end(); 4876 4877 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 4878 const src = inst_data.src(); 4879 const operand = sema.resolveInst(inst_data.operand); 4880 const operand_ty = sema.typeOf(operand); 4881 assert(operand_ty.zigTypeTag() == .Pointer); 4882 4883 if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) 4884 return sema.fail(block, src, "expected error union type, found {}", .{operand_ty.elemType()}); 4885 4886 const payload_ty = operand_ty.elemType().errorUnionPayload(); 4887 const operand_pointer_ty = try Type.ptr(sema.arena, .{ 4888 .pointee_type = payload_ty, 4889 .mutable = !operand_ty.isConstPtr(), 4890 .@"addrspace" = operand_ty.ptrAddressSpace(), 4891 }); 4892 4893 if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { 4894 if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| { 4895 if (val.getError()) |name| { 4896 return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); 4897 } 4898 return sema.addConstant( 4899 operand_pointer_ty, 4900 try Value.Tag.eu_payload_ptr.create(sema.arena, pointer_val), 4901 ); 4902 } 4903 } 4904 4905 try sema.requireRuntimeBlock(block, src); 4906 if (safety_check and block.wantSafety()) { 4907 const is_non_err = try block.addUnOp(.is_err, operand); 4908 try sema.addSafetyCheck(block, is_non_err, .unwrap_errunion); 4909 } 4910 return block.addTyOp(.unwrap_errunion_payload_ptr, operand_pointer_ty, operand); 4911} 4912 4913/// Value in, value out 4914fn zirErrUnionCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 4915 const tracy = trace(@src()); 4916 defer tracy.end(); 4917 4918 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 4919 const src = inst_data.src(); 4920 const operand = sema.resolveInst(inst_data.operand); 4921 const operand_ty = sema.typeOf(operand); 4922 if (operand_ty.zigTypeTag() != .ErrorUnion) 4923 return sema.fail(block, src, "expected error union type, found '{}'", .{operand_ty}); 4924 4925 const result_ty = operand_ty.errorUnionSet(); 4926 4927 if (try sema.resolveDefinedValue(block, src, operand)) |val| { 4928 assert(val.getError() != null); 4929 return sema.addConstant(result_ty, val); 4930 } 4931 4932 try sema.requireRuntimeBlock(block, src); 4933 return block.addTyOp(.unwrap_errunion_err, result_ty, operand); 4934} 4935 4936/// Pointer in, value out 4937fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 4938 const tracy = trace(@src()); 4939 defer tracy.end(); 4940 4941 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 4942 const src = inst_data.src(); 4943 const operand = sema.resolveInst(inst_data.operand); 4944 const operand_ty = sema.typeOf(operand); 4945 assert(operand_ty.zigTypeTag() == .Pointer); 4946 4947 if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) 4948 return sema.fail(block, src, "expected error union type, found {}", .{operand_ty.elemType()}); 4949 4950 const result_ty = operand_ty.elemType().errorUnionSet(); 4951 4952 if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { 4953 if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| { 4954 assert(val.getError() != null); 4955 return sema.addConstant(result_ty, val); 4956 } 4957 } 4958 4959 try sema.requireRuntimeBlock(block, src); 4960 return block.addTyOp(.unwrap_errunion_err_ptr, result_ty, operand); 4961} 4962 4963fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 4964 const tracy = trace(@src()); 4965 defer tracy.end(); 4966 4967 const inst_data = sema.code.instructions.items(.data)[inst].un_tok; 4968 const src = inst_data.src(); 4969 const operand = sema.resolveInst(inst_data.operand); 4970 const operand_ty = sema.typeOf(operand); 4971 if (operand_ty.zigTypeTag() != .ErrorUnion) 4972 return sema.fail(block, src, "expected error union type, found '{}'", .{operand_ty}); 4973 if (operand_ty.errorUnionPayload().zigTypeTag() != .Void) { 4974 return sema.fail(block, src, "expression value is ignored", .{}); 4975 } 4976} 4977 4978fn zirFunc( 4979 sema: *Sema, 4980 block: *Block, 4981 inst: Zir.Inst.Index, 4982 inferred_error_set: bool, 4983) CompileError!Air.Inst.Ref { 4984 const tracy = trace(@src()); 4985 defer tracy.end(); 4986 4987 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 4988 const extra = sema.code.extraData(Zir.Inst.Func, inst_data.payload_index); 4989 var extra_index = extra.end; 4990 const ret_ty_body = sema.code.extra[extra_index..][0..extra.data.ret_body_len]; 4991 extra_index += ret_ty_body.len; 4992 4993 var body_inst: Zir.Inst.Index = 0; 4994 var src_locs: Zir.Inst.Func.SrcLocs = undefined; 4995 if (extra.data.body_len != 0) { 4996 body_inst = inst; 4997 extra_index += extra.data.body_len; 4998 src_locs = sema.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data; 4999 } 5000 5001 const cc: std.builtin.CallingConvention = if (sema.owner_decl.is_exported) 5002 .C 5003 else 5004 .Unspecified; 5005 5006 return sema.funcCommon( 5007 block, 5008 inst_data.src_node, 5009 body_inst, 5010 ret_ty_body, 5011 cc, 5012 Value.@"null", 5013 false, 5014 inferred_error_set, 5015 false, 5016 src_locs, 5017 null, 5018 ); 5019} 5020 5021fn funcCommon( 5022 sema: *Sema, 5023 block: *Block, 5024 src_node_offset: i32, 5025 body_inst: Zir.Inst.Index, 5026 ret_ty_body: []const Zir.Inst.Index, 5027 cc: std.builtin.CallingConvention, 5028 align_val: Value, 5029 var_args: bool, 5030 inferred_error_set: bool, 5031 is_extern: bool, 5032 src_locs: Zir.Inst.Func.SrcLocs, 5033 opt_lib_name: ?[]const u8, 5034) CompileError!Air.Inst.Ref { 5035 const src: LazySrcLoc = .{ .node_offset = src_node_offset }; 5036 const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; 5037 5038 // The return type body might be a type expression that depends on generic parameters. 5039 // In such case we need to use a generic_poison value for the return type and mark 5040 // the function as generic. 5041 var is_generic = false; 5042 const bare_return_type: Type = ret_ty: { 5043 if (ret_ty_body.len == 0) break :ret_ty Type.void; 5044 5045 const err = err: { 5046 // Make sure any nested param instructions don't clobber our work. 5047 const prev_params = block.params; 5048 block.params = .{}; 5049 defer { 5050 block.params.deinit(sema.gpa); 5051 block.params = prev_params; 5052 } 5053 if (sema.resolveBody(block, ret_ty_body)) |ret_ty_inst| { 5054 if (sema.analyzeAsType(block, ret_ty_src, ret_ty_inst)) |ret_ty| { 5055 break :ret_ty ret_ty; 5056 } else |err| break :err err; 5057 } else |err| break :err err; 5058 }; 5059 switch (err) { 5060 error.GenericPoison => { 5061 // The type is not available until the generic instantiation. 5062 is_generic = true; 5063 break :ret_ty Type.initTag(.generic_poison); 5064 }, 5065 else => |e| return e, 5066 } 5067 }; 5068 5069 const mod = sema.mod; 5070 5071 const new_func: *Module.Fn = new_func: { 5072 if (body_inst == 0) break :new_func undefined; 5073 if (sema.comptime_args_fn_inst == body_inst) { 5074 const new_func = sema.preallocated_new_func.?; 5075 sema.preallocated_new_func = null; // take ownership 5076 break :new_func new_func; 5077 } 5078 break :new_func try sema.gpa.create(Module.Fn); 5079 }; 5080 errdefer if (body_inst != 0) sema.gpa.destroy(new_func); 5081 5082 const fn_ty: Type = fn_ty: { 5083 // Hot path for some common function types. 5084 // TODO can we eliminate some of these Type tag values? seems unnecessarily complicated. 5085 if (!is_generic and block.params.items.len == 0 and !var_args and 5086 align_val.tag() == .null_value and !inferred_error_set) 5087 { 5088 if (bare_return_type.zigTypeTag() == .NoReturn and cc == .Unspecified) { 5089 break :fn_ty Type.initTag(.fn_noreturn_no_args); 5090 } 5091 5092 if (bare_return_type.zigTypeTag() == .Void and cc == .Unspecified) { 5093 break :fn_ty Type.initTag(.fn_void_no_args); 5094 } 5095 5096 if (bare_return_type.zigTypeTag() == .NoReturn and cc == .Naked) { 5097 break :fn_ty Type.initTag(.fn_naked_noreturn_no_args); 5098 } 5099 5100 if (bare_return_type.zigTypeTag() == .Void and cc == .C) { 5101 break :fn_ty Type.initTag(.fn_ccc_void_no_args); 5102 } 5103 } 5104 5105 const param_types = try sema.arena.alloc(Type, block.params.items.len); 5106 const comptime_params = try sema.arena.alloc(bool, block.params.items.len); 5107 for (block.params.items) |param, i| { 5108 param_types[i] = param.ty; 5109 comptime_params[i] = param.is_comptime; 5110 is_generic = is_generic or param.is_comptime or 5111 param.ty.tag() == .generic_poison or param.ty.requiresComptime(); 5112 } 5113 5114 if (align_val.tag() != .null_value) { 5115 return sema.fail(block, src, "TODO implement support for function prototypes to have alignment specified", .{}); 5116 } 5117 5118 is_generic = is_generic or bare_return_type.requiresComptime(); 5119 5120 const return_type = if (!inferred_error_set or bare_return_type.tag() == .generic_poison) 5121 bare_return_type 5122 else blk: { 5123 const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, .{ 5124 .func = new_func, 5125 .map = .{}, 5126 .functions = .{}, 5127 .is_anyerror = false, 5128 }); 5129 break :blk try Type.Tag.error_union.create(sema.arena, .{ 5130 .error_set = error_set_ty, 5131 .payload = bare_return_type, 5132 }); 5133 }; 5134 5135 break :fn_ty try Type.Tag.function.create(sema.arena, .{ 5136 .param_types = param_types, 5137 .comptime_params = comptime_params.ptr, 5138 .return_type = return_type, 5139 .cc = cc, 5140 .is_var_args = var_args, 5141 .is_generic = is_generic, 5142 }); 5143 }; 5144 5145 if (opt_lib_name) |lib_name| blk: { 5146 const lib_name_src: LazySrcLoc = .{ .node_offset_lib_name = src_node_offset }; 5147 log.debug("extern fn symbol expected in lib '{s}'", .{lib_name}); 5148 mod.comp.stage1AddLinkLib(lib_name) catch |err| { 5149 return sema.fail(block, lib_name_src, "unable to add link lib '{s}': {s}", .{ 5150 lib_name, @errorName(err), 5151 }); 5152 }; 5153 const target = mod.getTarget(); 5154 if (target_util.is_libc_lib_name(target, lib_name)) { 5155 if (!mod.comp.bin_file.options.link_libc) { 5156 return sema.fail( 5157 block, 5158 lib_name_src, 5159 "dependency on libc must be explicitly specified in the build command", 5160 .{}, 5161 ); 5162 } 5163 break :blk; 5164 } 5165 if (target_util.is_libcpp_lib_name(target, lib_name)) { 5166 if (!mod.comp.bin_file.options.link_libcpp) { 5167 return sema.fail( 5168 block, 5169 lib_name_src, 5170 "dependency on libc++ must be explicitly specified in the build command", 5171 .{}, 5172 ); 5173 } 5174 break :blk; 5175 } 5176 if (!target.isWasm() and !mod.comp.bin_file.options.pic) { 5177 return sema.fail( 5178 block, 5179 lib_name_src, 5180 "dependency on dynamic library '{s}' requires enabling Position Independent Code. Fixed by `-l{s}` or `-fPIC`.", 5181 .{ lib_name, lib_name }, 5182 ); 5183 } 5184 } 5185 5186 if (is_extern) { 5187 return sema.addConstant( 5188 fn_ty, 5189 try Value.Tag.extern_fn.create(sema.arena, sema.owner_decl), 5190 ); 5191 } 5192 5193 if (body_inst == 0) { 5194 const fn_ptr_ty = try Type.ptr(sema.arena, .{ 5195 .pointee_type = fn_ty, 5196 .@"addrspace" = .generic, 5197 .mutable = false, 5198 }); 5199 return sema.addType(fn_ptr_ty); 5200 } 5201 5202 const is_inline = fn_ty.fnCallingConvention() == .Inline; 5203 const anal_state: Module.Fn.Analysis = if (is_inline) .inline_only else .queued; 5204 5205 const comptime_args: ?[*]TypedValue = if (sema.comptime_args_fn_inst == body_inst) blk: { 5206 break :blk if (sema.comptime_args.len == 0) null else sema.comptime_args.ptr; 5207 } else null; 5208 5209 const fn_payload = try sema.arena.create(Value.Payload.Function); 5210 new_func.* = .{ 5211 .state = anal_state, 5212 .zir_body_inst = body_inst, 5213 .owner_decl = sema.owner_decl, 5214 .comptime_args = comptime_args, 5215 .lbrace_line = src_locs.lbrace_line, 5216 .rbrace_line = src_locs.rbrace_line, 5217 .lbrace_column = @truncate(u16, src_locs.columns), 5218 .rbrace_column = @truncate(u16, src_locs.columns >> 16), 5219 }; 5220 fn_payload.* = .{ 5221 .base = .{ .tag = .function }, 5222 .data = new_func, 5223 }; 5224 return sema.addConstant(fn_ty, Value.initPayload(&fn_payload.base)); 5225} 5226 5227fn zirParam( 5228 sema: *Sema, 5229 block: *Block, 5230 inst: Zir.Inst.Index, 5231 is_comptime: bool, 5232) CompileError!void { 5233 const inst_data = sema.code.instructions.items(.data)[inst].pl_tok; 5234 const src = inst_data.src(); 5235 const extra = sema.code.extraData(Zir.Inst.Param, inst_data.payload_index); 5236 const param_name = sema.code.nullTerminatedString(extra.data.name); 5237 const body = sema.code.extra[extra.end..][0..extra.data.body_len]; 5238 5239 // TODO check if param_name shadows a Decl. This only needs to be done if 5240 // usingnamespace is implemented. 5241 _ = param_name; 5242 5243 // We could be in a generic function instantiation, or we could be evaluating a generic 5244 // function without any comptime args provided. 5245 const param_ty = param_ty: { 5246 const err = err: { 5247 // Make sure any nested param instructions don't clobber our work. 5248 const prev_params = block.params; 5249 block.params = .{}; 5250 defer { 5251 block.params.deinit(sema.gpa); 5252 block.params = prev_params; 5253 } 5254 5255 if (sema.resolveBody(block, body)) |param_ty_inst| { 5256 if (sema.analyzeAsType(block, src, param_ty_inst)) |param_ty| { 5257 break :param_ty param_ty; 5258 } else |err| break :err err; 5259 } else |err| break :err err; 5260 }; 5261 switch (err) { 5262 error.GenericPoison => { 5263 // The type is not available until the generic instantiation. 5264 // We result the param instruction with a poison value and 5265 // insert an anytype parameter. 5266 try block.params.append(sema.gpa, .{ 5267 .ty = Type.initTag(.generic_poison), 5268 .is_comptime = is_comptime, 5269 }); 5270 try sema.inst_map.putNoClobber(sema.gpa, inst, .generic_poison); 5271 return; 5272 }, 5273 else => |e| return e, 5274 } 5275 }; 5276 if (sema.inst_map.get(inst)) |arg| { 5277 if (is_comptime or param_ty.requiresComptime()) { 5278 // We have a comptime value for this parameter so it should be elided from the 5279 // function type of the function instruction in this block. 5280 const coerced_arg = try sema.coerce(block, param_ty, arg, src); 5281 sema.inst_map.putAssumeCapacity(inst, coerced_arg); 5282 return; 5283 } 5284 // Even though a comptime argument is provided, the generic function wants to treat 5285 // this as a runtime parameter. 5286 assert(sema.inst_map.remove(inst)); 5287 } 5288 5289 try block.params.append(sema.gpa, .{ 5290 .ty = param_ty, 5291 .is_comptime = is_comptime or param_ty.requiresComptime(), 5292 }); 5293 const result = try sema.addConstant(param_ty, Value.initTag(.generic_poison)); 5294 try sema.inst_map.putNoClobber(sema.gpa, inst, result); 5295} 5296 5297fn zirParamAnytype( 5298 sema: *Sema, 5299 block: *Block, 5300 inst: Zir.Inst.Index, 5301 is_comptime: bool, 5302) CompileError!void { 5303 const inst_data = sema.code.instructions.items(.data)[inst].str_tok; 5304 const param_name = inst_data.get(sema.code); 5305 5306 // TODO check if param_name shadows a Decl. This only needs to be done if 5307 // usingnamespace is implemented. 5308 _ = param_name; 5309 5310 if (sema.inst_map.get(inst)) |air_ref| { 5311 const param_ty = sema.typeOf(air_ref); 5312 if (is_comptime or param_ty.requiresComptime()) { 5313 // We have a comptime value for this parameter so it should be elided from the 5314 // function type of the function instruction in this block. 5315 return; 5316 } 5317 // The map is already populated but we do need to add a runtime parameter. 5318 try block.params.append(sema.gpa, .{ 5319 .ty = param_ty, 5320 .is_comptime = false, 5321 }); 5322 return; 5323 } 5324 5325 // We are evaluating a generic function without any comptime args provided. 5326 5327 try block.params.append(sema.gpa, .{ 5328 .ty = Type.initTag(.generic_poison), 5329 .is_comptime = is_comptime, 5330 }); 5331 try sema.inst_map.put(sema.gpa, inst, .generic_poison); 5332} 5333 5334fn zirAs(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5335 const tracy = trace(@src()); 5336 defer tracy.end(); 5337 5338 const bin_inst = sema.code.instructions.items(.data)[inst].bin; 5339 return sema.analyzeAs(block, .unneeded, bin_inst.lhs, bin_inst.rhs); 5340} 5341 5342fn zirAsNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5343 const tracy = trace(@src()); 5344 defer tracy.end(); 5345 5346 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 5347 const src = inst_data.src(); 5348 const extra = sema.code.extraData(Zir.Inst.As, inst_data.payload_index).data; 5349 return sema.analyzeAs(block, src, extra.dest_type, extra.operand); 5350} 5351 5352fn analyzeAs( 5353 sema: *Sema, 5354 block: *Block, 5355 src: LazySrcLoc, 5356 zir_dest_type: Zir.Inst.Ref, 5357 zir_operand: Zir.Inst.Ref, 5358) CompileError!Air.Inst.Ref { 5359 const dest_ty = try sema.resolveType(block, src, zir_dest_type); 5360 const operand = sema.resolveInst(zir_operand); 5361 return sema.coerce(block, dest_ty, operand, src); 5362} 5363 5364fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5365 const tracy = trace(@src()); 5366 defer tracy.end(); 5367 5368 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 5369 const ptr = sema.resolveInst(inst_data.operand); 5370 const ptr_ty = sema.typeOf(ptr); 5371 if (ptr_ty.zigTypeTag() != .Pointer) { 5372 const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 5373 return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty}); 5374 } 5375 // TODO handle known-pointer-address 5376 const src = inst_data.src(); 5377 try sema.requireRuntimeBlock(block, src); 5378 return block.addUnOp(.ptrtoint, ptr); 5379} 5380 5381fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5382 const tracy = trace(@src()); 5383 defer tracy.end(); 5384 5385 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 5386 const src = inst_data.src(); 5387 const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; 5388 const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; 5389 const field_name = sema.code.nullTerminatedString(extra.field_name_start); 5390 const object = sema.resolveInst(extra.lhs); 5391 return sema.fieldVal(block, src, object, field_name, field_name_src); 5392} 5393 5394fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5395 const tracy = trace(@src()); 5396 defer tracy.end(); 5397 5398 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 5399 const src = inst_data.src(); 5400 const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; 5401 const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; 5402 const field_name = sema.code.nullTerminatedString(extra.field_name_start); 5403 const object_ptr = sema.resolveInst(extra.lhs); 5404 return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src); 5405} 5406 5407fn zirFieldCallBind(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5408 const tracy = trace(@src()); 5409 defer tracy.end(); 5410 5411 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 5412 const src = inst_data.src(); 5413 const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; 5414 const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; 5415 const field_name = sema.code.nullTerminatedString(extra.field_name_start); 5416 const object_ptr = sema.resolveInst(extra.lhs); 5417 return sema.fieldCallBind(block, src, object_ptr, field_name, field_name_src); 5418} 5419 5420fn zirFieldValNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5421 const tracy = trace(@src()); 5422 defer tracy.end(); 5423 5424 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 5425 const src = inst_data.src(); 5426 const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 5427 const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; 5428 const object = sema.resolveInst(extra.lhs); 5429 const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name); 5430 return sema.fieldVal(block, src, object, field_name, field_name_src); 5431} 5432 5433fn zirFieldPtrNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5434 const tracy = trace(@src()); 5435 defer tracy.end(); 5436 5437 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 5438 const src = inst_data.src(); 5439 const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 5440 const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; 5441 const object_ptr = sema.resolveInst(extra.lhs); 5442 const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name); 5443 return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src); 5444} 5445 5446fn zirFieldCallBindNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5447 const tracy = trace(@src()); 5448 defer tracy.end(); 5449 5450 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 5451 const src = inst_data.src(); 5452 const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 5453 const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; 5454 const object_ptr = sema.resolveInst(extra.lhs); 5455 const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name); 5456 return sema.fieldCallBind(block, src, object_ptr, field_name, field_name_src); 5457} 5458 5459fn zirIntCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5460 const tracy = trace(@src()); 5461 defer tracy.end(); 5462 5463 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 5464 const src = inst_data.src(); 5465 const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 5466 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 5467 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 5468 5469 const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); 5470 const operand = sema.resolveInst(extra.rhs); 5471 5472 const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_ty); 5473 _ = try sema.checkIntType(block, operand_src, sema.typeOf(operand)); 5474 5475 if (try sema.isComptimeKnown(block, operand_src, operand)) { 5476 return sema.coerce(block, dest_ty, operand, operand_src); 5477 } else if (dest_is_comptime_int) { 5478 return sema.fail(block, src, "unable to cast runtime value to 'comptime_int'", .{}); 5479 } 5480 5481 try sema.requireRuntimeBlock(block, operand_src); 5482 // TODO insert safety check to make sure the value fits in the dest type 5483 return block.addTyOp(.intcast, dest_ty, operand); 5484} 5485 5486fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5487 const tracy = trace(@src()); 5488 defer tracy.end(); 5489 5490 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 5491 const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 5492 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 5493 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 5494 5495 const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); 5496 const operand = sema.resolveInst(extra.rhs); 5497 return sema.bitCast(block, dest_ty, operand, operand_src); 5498} 5499 5500fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5501 const tracy = trace(@src()); 5502 defer tracy.end(); 5503 5504 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 5505 const src = inst_data.src(); 5506 const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 5507 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 5508 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 5509 5510 const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); 5511 const operand = sema.resolveInst(extra.rhs); 5512 5513 const dest_is_comptime_float = switch (dest_ty.zigTypeTag()) { 5514 .ComptimeFloat => true, 5515 .Float => false, 5516 else => return sema.fail( 5517 block, 5518 dest_ty_src, 5519 "expected float type, found '{}'", 5520 .{dest_ty}, 5521 ), 5522 }; 5523 5524 const operand_ty = sema.typeOf(operand); 5525 switch (operand_ty.zigTypeTag()) { 5526 .ComptimeFloat, .Float, .ComptimeInt => {}, 5527 else => return sema.fail( 5528 block, 5529 operand_src, 5530 "expected float type, found '{}'", 5531 .{operand_ty}, 5532 ), 5533 } 5534 5535 if (try sema.isComptimeKnown(block, operand_src, operand)) { 5536 return sema.coerce(block, dest_ty, operand, operand_src); 5537 } 5538 if (dest_is_comptime_float) { 5539 return sema.fail(block, src, "unable to cast runtime value to 'comptime_float'", .{}); 5540 } 5541 const target = sema.mod.getTarget(); 5542 const src_bits = operand_ty.floatBits(target); 5543 const dst_bits = dest_ty.floatBits(target); 5544 if (dst_bits >= src_bits) { 5545 return sema.coerce(block, dest_ty, operand, operand_src); 5546 } 5547 try sema.requireRuntimeBlock(block, operand_src); 5548 return block.addTyOp(.fptrunc, dest_ty, operand); 5549} 5550 5551fn zirElemVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5552 const tracy = trace(@src()); 5553 defer tracy.end(); 5554 5555 const bin_inst = sema.code.instructions.items(.data)[inst].bin; 5556 const array = sema.resolveInst(bin_inst.lhs); 5557 const elem_index = sema.resolveInst(bin_inst.rhs); 5558 return sema.elemVal(block, sema.src, array, elem_index, sema.src); 5559} 5560 5561fn zirElemValNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5562 const tracy = trace(@src()); 5563 defer tracy.end(); 5564 5565 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 5566 const src = inst_data.src(); 5567 const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; 5568 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 5569 const array = sema.resolveInst(extra.lhs); 5570 const elem_index = sema.resolveInst(extra.rhs); 5571 return sema.elemVal(block, src, array, elem_index, elem_index_src); 5572} 5573 5574fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5575 const tracy = trace(@src()); 5576 defer tracy.end(); 5577 5578 const bin_inst = sema.code.instructions.items(.data)[inst].bin; 5579 const array_ptr = sema.resolveInst(bin_inst.lhs); 5580 const elem_index = sema.resolveInst(bin_inst.rhs); 5581 return sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src); 5582} 5583 5584fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5585 const tracy = trace(@src()); 5586 defer tracy.end(); 5587 5588 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 5589 const src = inst_data.src(); 5590 const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; 5591 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 5592 const array_ptr = sema.resolveInst(extra.lhs); 5593 const elem_index = sema.resolveInst(extra.rhs); 5594 return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src); 5595} 5596 5597fn zirElemPtrImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5598 const tracy = trace(@src()); 5599 defer tracy.end(); 5600 5601 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 5602 const src = inst_data.src(); 5603 const extra = sema.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data; 5604 const array_ptr = sema.resolveInst(extra.ptr); 5605 const elem_index = try sema.addIntUnsigned(Type.usize, extra.index); 5606 return sema.elemPtr(block, src, array_ptr, elem_index, src); 5607} 5608 5609fn zirSliceStart(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5610 const tracy = trace(@src()); 5611 defer tracy.end(); 5612 5613 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 5614 const src = inst_data.src(); 5615 const extra = sema.code.extraData(Zir.Inst.SliceStart, inst_data.payload_index).data; 5616 const array_ptr = sema.resolveInst(extra.lhs); 5617 const start = sema.resolveInst(extra.start); 5618 5619 return sema.analyzeSlice(block, src, array_ptr, start, .none, .none, .unneeded); 5620} 5621 5622fn zirSliceEnd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5623 const tracy = trace(@src()); 5624 defer tracy.end(); 5625 5626 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 5627 const src = inst_data.src(); 5628 const extra = sema.code.extraData(Zir.Inst.SliceEnd, inst_data.payload_index).data; 5629 const array_ptr = sema.resolveInst(extra.lhs); 5630 const start = sema.resolveInst(extra.start); 5631 const end = sema.resolveInst(extra.end); 5632 5633 return sema.analyzeSlice(block, src, array_ptr, start, end, .none, .unneeded); 5634} 5635 5636fn zirSliceSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5637 const tracy = trace(@src()); 5638 defer tracy.end(); 5639 5640 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 5641 const src = inst_data.src(); 5642 const sentinel_src: LazySrcLoc = .{ .node_offset_slice_sentinel = inst_data.src_node }; 5643 const extra = sema.code.extraData(Zir.Inst.SliceSentinel, inst_data.payload_index).data; 5644 const array_ptr = sema.resolveInst(extra.lhs); 5645 const start = sema.resolveInst(extra.start); 5646 const end = sema.resolveInst(extra.end); 5647 const sentinel = sema.resolveInst(extra.sentinel); 5648 5649 return sema.analyzeSlice(block, src, array_ptr, start, end, sentinel, sentinel_src); 5650} 5651 5652fn zirSwitchCapture( 5653 sema: *Sema, 5654 block: *Block, 5655 inst: Zir.Inst.Index, 5656 is_multi: bool, 5657 is_ref: bool, 5658) CompileError!Air.Inst.Ref { 5659 const tracy = trace(@src()); 5660 defer tracy.end(); 5661 5662 const zir_datas = sema.code.instructions.items(.data); 5663 const capture_info = zir_datas[inst].switch_capture; 5664 const switch_info = zir_datas[capture_info.switch_inst].pl_node; 5665 const switch_extra = sema.code.extraData(Zir.Inst.SwitchBlock, switch_info.payload_index); 5666 const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = switch_info.src_node }; 5667 const switch_src = switch_info.src(); 5668 const operand_is_ref = switch_extra.data.bits.is_ref; 5669 const cond_inst = Zir.refToIndex(switch_extra.data.operand).?; 5670 const cond_info = sema.code.instructions.items(.data)[cond_inst].un_node; 5671 const operand_ptr = sema.resolveInst(cond_info.operand); 5672 const operand_ptr_ty = sema.typeOf(operand_ptr); 5673 const operand_ty = if (operand_is_ref) operand_ptr_ty.childType() else operand_ptr_ty; 5674 5675 if (is_multi) { 5676 return sema.fail(block, switch_src, "TODO implement Sema for switch capture multi", .{}); 5677 } 5678 const scalar_prong = switch_extra.data.getScalarProng(sema.code, switch_extra.end, capture_info.prong_index); 5679 const item = sema.resolveInst(scalar_prong.item); 5680 // Previous switch validation ensured this will succeed 5681 const item_val = sema.resolveConstValue(block, .unneeded, item) catch unreachable; 5682 5683 switch (operand_ty.zigTypeTag()) { 5684 .Union => { 5685 const union_obj = operand_ty.cast(Type.Payload.Union).?.data; 5686 const enum_ty = union_obj.tag_ty; 5687 5688 const field_index_usize = enum_ty.enumTagFieldIndex(item_val).?; 5689 const field_index = @intCast(u32, field_index_usize); 5690 const field = union_obj.fields.values()[field_index]; 5691 5692 // TODO handle multiple union tags which have compatible types 5693 5694 if (is_ref) { 5695 assert(operand_is_ref); 5696 5697 const field_ty_ptr = try Type.ptr(sema.arena, .{ 5698 .pointee_type = field.ty, 5699 .@"addrspace" = .generic, 5700 .mutable = operand_ptr_ty.ptrIsMutable(), 5701 }); 5702 5703 if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| { 5704 return sema.addConstant( 5705 field_ty_ptr, 5706 try Value.Tag.field_ptr.create(sema.arena, .{ 5707 .container_ptr = op_ptr_val, 5708 .field_index = field_index, 5709 }), 5710 ); 5711 } 5712 try sema.requireRuntimeBlock(block, operand_src); 5713 return block.addStructFieldPtr(operand_ptr, field_index, field_ty_ptr); 5714 } 5715 5716 const operand = if (operand_is_ref) 5717 try sema.analyzeLoad(block, operand_src, operand_ptr, operand_src) 5718 else 5719 operand_ptr; 5720 5721 if (try sema.resolveDefinedValue(block, operand_src, operand)) |operand_val| { 5722 return sema.addConstant( 5723 field.ty, 5724 operand_val.castTag(.@"union").?.data.val, 5725 ); 5726 } 5727 try sema.requireRuntimeBlock(block, operand_src); 5728 return block.addStructFieldVal(operand, field_index, field.ty); 5729 }, 5730 .ErrorSet => { 5731 return sema.fail(block, operand_src, "TODO implement Sema for zirSwitchCapture for error sets", .{}); 5732 }, 5733 else => { 5734 return sema.fail(block, operand_src, "switch on type '{}' provides no capture value", .{ 5735 operand_ty, 5736 }); 5737 }, 5738 } 5739} 5740 5741fn zirSwitchCaptureElse( 5742 sema: *Sema, 5743 block: *Block, 5744 inst: Zir.Inst.Index, 5745 is_ref: bool, 5746) CompileError!Air.Inst.Ref { 5747 const tracy = trace(@src()); 5748 defer tracy.end(); 5749 5750 const zir_datas = sema.code.instructions.items(.data); 5751 const capture_info = zir_datas[inst].switch_capture; 5752 const switch_info = zir_datas[capture_info.switch_inst].pl_node; 5753 const switch_extra = sema.code.extraData(Zir.Inst.SwitchBlock, switch_info.payload_index).data; 5754 const src = switch_info.src(); 5755 const operand_is_ref = switch_extra.bits.is_ref; 5756 assert(!is_ref or operand_is_ref); 5757 5758 return sema.fail(block, src, "TODO implement Sema for zirSwitchCaptureElse", .{}); 5759} 5760 5761fn zirSwitchCond( 5762 sema: *Sema, 5763 block: *Block, 5764 inst: Zir.Inst.Index, 5765 is_ref: bool, 5766) CompileError!Air.Inst.Ref { 5767 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 5768 const src = inst_data.src(); 5769 const operand_ptr = sema.resolveInst(inst_data.operand); 5770 const operand = if (is_ref) try sema.analyzeLoad(block, src, operand_ptr, src) else operand_ptr; 5771 const operand_ty = sema.typeOf(operand); 5772 5773 switch (operand_ty.zigTypeTag()) { 5774 .Type, 5775 .Void, 5776 .Bool, 5777 .Int, 5778 .Float, 5779 .ComptimeFloat, 5780 .ComptimeInt, 5781 .EnumLiteral, 5782 .Pointer, 5783 .Fn, 5784 .ErrorSet, 5785 .Enum, 5786 => { 5787 if ((try sema.typeHasOnePossibleValue(block, src, operand_ty))) |opv| { 5788 return sema.addConstant(operand_ty, opv); 5789 } 5790 return operand; 5791 }, 5792 5793 .Union => { 5794 const enum_ty = operand_ty.unionTagType() orelse { 5795 const msg = msg: { 5796 const msg = try sema.errMsg(block, src, "switch on untagged union", .{}); 5797 errdefer msg.destroy(sema.gpa); 5798 try sema.addDeclaredHereNote(msg, operand_ty); 5799 break :msg msg; 5800 }; 5801 return sema.failWithOwnedErrorMsg(msg); 5802 }; 5803 return sema.unionToTag(block, enum_ty, operand, src); 5804 }, 5805 5806 .ErrorUnion, 5807 .NoReturn, 5808 .Array, 5809 .Struct, 5810 .Undefined, 5811 .Null, 5812 .Optional, 5813 .BoundFn, 5814 .Opaque, 5815 .Vector, 5816 .Frame, 5817 .AnyFrame, 5818 => return sema.fail(block, src, "switch on type '{}'", .{operand_ty}), 5819 } 5820} 5821 5822fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 5823 const tracy = trace(@src()); 5824 defer tracy.end(); 5825 5826 const gpa = sema.gpa; 5827 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 5828 const src = inst_data.src(); 5829 const src_node_offset = inst_data.src_node; 5830 const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset }; 5831 const special_prong_src: LazySrcLoc = .{ .node_offset_switch_special_prong = src_node_offset }; 5832 const extra = sema.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index); 5833 5834 const operand = sema.resolveInst(extra.data.operand); 5835 5836 var header_extra_index: usize = extra.end; 5837 5838 const scalar_cases_len = extra.data.bits.scalar_cases_len; 5839 const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: { 5840 const multi_cases_len = sema.code.extra[header_extra_index]; 5841 header_extra_index += 1; 5842 break :blk multi_cases_len; 5843 } else 0; 5844 5845 const special_prong = extra.data.bits.specialProng(); 5846 const special: struct { body: []const Zir.Inst.Index, end: usize } = switch (special_prong) { 5847 .none => .{ .body = &.{}, .end = header_extra_index }, 5848 .under, .@"else" => blk: { 5849 const body_len = sema.code.extra[header_extra_index]; 5850 const extra_body_start = header_extra_index + 1; 5851 break :blk .{ 5852 .body = sema.code.extra[extra_body_start..][0..body_len], 5853 .end = extra_body_start + body_len, 5854 }; 5855 }, 5856 }; 5857 5858 const operand_ty = sema.typeOf(operand); 5859 5860 // Validate usage of '_' prongs. 5861 if (special_prong == .under and !operand_ty.isNonexhaustiveEnum()) { 5862 const msg = msg: { 5863 const msg = try sema.errMsg( 5864 block, 5865 src, 5866 "'_' prong only allowed when switching on non-exhaustive enums", 5867 .{}, 5868 ); 5869 errdefer msg.destroy(gpa); 5870 try sema.errNote( 5871 block, 5872 special_prong_src, 5873 msg, 5874 "'_' prong here", 5875 .{}, 5876 ); 5877 break :msg msg; 5878 }; 5879 return sema.failWithOwnedErrorMsg(msg); 5880 } 5881 5882 // Validate for duplicate items, missing else prong, and invalid range. 5883 switch (operand_ty.zigTypeTag()) { 5884 .Enum => { 5885 var seen_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount()); 5886 defer gpa.free(seen_fields); 5887 5888 mem.set(?Module.SwitchProngSrc, seen_fields, null); 5889 5890 var extra_index: usize = special.end; 5891 { 5892 var scalar_i: u32 = 0; 5893 while (scalar_i < scalar_cases_len) : (scalar_i += 1) { 5894 const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); 5895 extra_index += 1; 5896 const body_len = sema.code.extra[extra_index]; 5897 extra_index += 1; 5898 extra_index += body_len; 5899 5900 try sema.validateSwitchItemEnum( 5901 block, 5902 seen_fields, 5903 item_ref, 5904 src_node_offset, 5905 .{ .scalar = scalar_i }, 5906 ); 5907 } 5908 } 5909 { 5910 var multi_i: u32 = 0; 5911 while (multi_i < multi_cases_len) : (multi_i += 1) { 5912 const items_len = sema.code.extra[extra_index]; 5913 extra_index += 1; 5914 const ranges_len = sema.code.extra[extra_index]; 5915 extra_index += 1; 5916 const body_len = sema.code.extra[extra_index]; 5917 extra_index += 1; 5918 const items = sema.code.refSlice(extra_index, items_len); 5919 extra_index += items_len + body_len; 5920 5921 for (items) |item_ref, item_i| { 5922 try sema.validateSwitchItemEnum( 5923 block, 5924 seen_fields, 5925 item_ref, 5926 src_node_offset, 5927 .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, 5928 ); 5929 } 5930 5931 try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); 5932 } 5933 } 5934 const all_tags_handled = for (seen_fields) |seen_src| { 5935 if (seen_src == null) break false; 5936 } else true; 5937 5938 switch (special_prong) { 5939 .none => { 5940 if (!all_tags_handled) { 5941 const msg = msg: { 5942 const msg = try sema.errMsg( 5943 block, 5944 src, 5945 "switch must handle all possibilities", 5946 .{}, 5947 ); 5948 errdefer msg.destroy(sema.gpa); 5949 for (seen_fields) |seen_src, i| { 5950 if (seen_src != null) continue; 5951 5952 const field_name = operand_ty.enumFieldName(i); 5953 5954 // TODO have this point to the tag decl instead of here 5955 try sema.errNote( 5956 block, 5957 src, 5958 msg, 5959 "unhandled enumeration value: '{s}'", 5960 .{field_name}, 5961 ); 5962 } 5963 try sema.mod.errNoteNonLazy( 5964 operand_ty.declSrcLoc(), 5965 msg, 5966 "enum '{}' declared here", 5967 .{operand_ty}, 5968 ); 5969 break :msg msg; 5970 }; 5971 return sema.failWithOwnedErrorMsg(msg); 5972 } 5973 }, 5974 .under => { 5975 if (all_tags_handled) return sema.fail( 5976 block, 5977 special_prong_src, 5978 "unreachable '_' prong; all cases already handled", 5979 .{}, 5980 ); 5981 }, 5982 .@"else" => { 5983 if (all_tags_handled) return sema.fail( 5984 block, 5985 special_prong_src, 5986 "unreachable else prong; all cases already handled", 5987 .{}, 5988 ); 5989 }, 5990 } 5991 }, 5992 5993 .ErrorSet => return sema.fail(block, src, "TODO validate switch .ErrorSet", .{}), 5994 .Union => return sema.fail(block, src, "TODO validate switch .Union", .{}), 5995 .Int, .ComptimeInt => { 5996 var range_set = RangeSet.init(gpa); 5997 defer range_set.deinit(); 5998 5999 var extra_index: usize = special.end; 6000 { 6001 var scalar_i: u32 = 0; 6002 while (scalar_i < scalar_cases_len) : (scalar_i += 1) { 6003 const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); 6004 extra_index += 1; 6005 const body_len = sema.code.extra[extra_index]; 6006 extra_index += 1; 6007 extra_index += body_len; 6008 6009 try sema.validateSwitchItem( 6010 block, 6011 &range_set, 6012 item_ref, 6013 operand_ty, 6014 src_node_offset, 6015 .{ .scalar = scalar_i }, 6016 ); 6017 } 6018 } 6019 { 6020 var multi_i: u32 = 0; 6021 while (multi_i < multi_cases_len) : (multi_i += 1) { 6022 const items_len = sema.code.extra[extra_index]; 6023 extra_index += 1; 6024 const ranges_len = sema.code.extra[extra_index]; 6025 extra_index += 1; 6026 const body_len = sema.code.extra[extra_index]; 6027 extra_index += 1; 6028 const items = sema.code.refSlice(extra_index, items_len); 6029 extra_index += items_len; 6030 6031 for (items) |item_ref, item_i| { 6032 try sema.validateSwitchItem( 6033 block, 6034 &range_set, 6035 item_ref, 6036 operand_ty, 6037 src_node_offset, 6038 .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, 6039 ); 6040 } 6041 6042 var range_i: u32 = 0; 6043 while (range_i < ranges_len) : (range_i += 1) { 6044 const item_first = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); 6045 extra_index += 1; 6046 const item_last = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); 6047 extra_index += 1; 6048 6049 try sema.validateSwitchRange( 6050 block, 6051 &range_set, 6052 item_first, 6053 item_last, 6054 operand_ty, 6055 src_node_offset, 6056 .{ .range = .{ .prong = multi_i, .item = range_i } }, 6057 ); 6058 } 6059 6060 extra_index += body_len; 6061 } 6062 } 6063 6064 check_range: { 6065 if (operand_ty.zigTypeTag() == .Int) { 6066 var arena = std.heap.ArenaAllocator.init(gpa); 6067 defer arena.deinit(); 6068 6069 const target = sema.mod.getTarget(); 6070 const min_int = try operand_ty.minInt(arena.allocator(), target); 6071 const max_int = try operand_ty.maxInt(arena.allocator(), target); 6072 if (try range_set.spans(min_int, max_int, operand_ty)) { 6073 if (special_prong == .@"else") { 6074 return sema.fail( 6075 block, 6076 special_prong_src, 6077 "unreachable else prong; all cases already handled", 6078 .{}, 6079 ); 6080 } 6081 break :check_range; 6082 } 6083 } 6084 if (special_prong != .@"else") { 6085 return sema.fail( 6086 block, 6087 src, 6088 "switch must handle all possibilities", 6089 .{}, 6090 ); 6091 } 6092 } 6093 }, 6094 .Bool => { 6095 var true_count: u8 = 0; 6096 var false_count: u8 = 0; 6097 6098 var extra_index: usize = special.end; 6099 { 6100 var scalar_i: u32 = 0; 6101 while (scalar_i < scalar_cases_len) : (scalar_i += 1) { 6102 const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); 6103 extra_index += 1; 6104 const body_len = sema.code.extra[extra_index]; 6105 extra_index += 1; 6106 extra_index += body_len; 6107 6108 try sema.validateSwitchItemBool( 6109 block, 6110 &true_count, 6111 &false_count, 6112 item_ref, 6113 src_node_offset, 6114 .{ .scalar = scalar_i }, 6115 ); 6116 } 6117 } 6118 { 6119 var multi_i: u32 = 0; 6120 while (multi_i < multi_cases_len) : (multi_i += 1) { 6121 const items_len = sema.code.extra[extra_index]; 6122 extra_index += 1; 6123 const ranges_len = sema.code.extra[extra_index]; 6124 extra_index += 1; 6125 const body_len = sema.code.extra[extra_index]; 6126 extra_index += 1; 6127 const items = sema.code.refSlice(extra_index, items_len); 6128 extra_index += items_len + body_len; 6129 6130 for (items) |item_ref, item_i| { 6131 try sema.validateSwitchItemBool( 6132 block, 6133 &true_count, 6134 &false_count, 6135 item_ref, 6136 src_node_offset, 6137 .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, 6138 ); 6139 } 6140 6141 try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); 6142 } 6143 } 6144 switch (special_prong) { 6145 .@"else" => { 6146 if (true_count + false_count == 2) { 6147 return sema.fail( 6148 block, 6149 src, 6150 "unreachable else prong; all cases already handled", 6151 .{}, 6152 ); 6153 } 6154 }, 6155 .under, .none => { 6156 if (true_count + false_count < 2) { 6157 return sema.fail( 6158 block, 6159 src, 6160 "switch must handle all possibilities", 6161 .{}, 6162 ); 6163 } 6164 }, 6165 } 6166 }, 6167 .EnumLiteral, .Void, .Fn, .Pointer, .Type => { 6168 if (special_prong != .@"else") { 6169 return sema.fail( 6170 block, 6171 src, 6172 "else prong required when switching on type '{}'", 6173 .{operand_ty}, 6174 ); 6175 } 6176 6177 var seen_values = ValueSrcMap.initContext(gpa, .{ .ty = operand_ty }); 6178 defer seen_values.deinit(); 6179 6180 var extra_index: usize = special.end; 6181 { 6182 var scalar_i: u32 = 0; 6183 while (scalar_i < scalar_cases_len) : (scalar_i += 1) { 6184 const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); 6185 extra_index += 1; 6186 const body_len = sema.code.extra[extra_index]; 6187 extra_index += 1; 6188 extra_index += body_len; 6189 6190 try sema.validateSwitchItemSparse( 6191 block, 6192 &seen_values, 6193 item_ref, 6194 src_node_offset, 6195 .{ .scalar = scalar_i }, 6196 ); 6197 } 6198 } 6199 { 6200 var multi_i: u32 = 0; 6201 while (multi_i < multi_cases_len) : (multi_i += 1) { 6202 const items_len = sema.code.extra[extra_index]; 6203 extra_index += 1; 6204 const ranges_len = sema.code.extra[extra_index]; 6205 extra_index += 1; 6206 const body_len = sema.code.extra[extra_index]; 6207 extra_index += 1; 6208 const items = sema.code.refSlice(extra_index, items_len); 6209 extra_index += items_len + body_len; 6210 6211 for (items) |item_ref, item_i| { 6212 try sema.validateSwitchItemSparse( 6213 block, 6214 &seen_values, 6215 item_ref, 6216 src_node_offset, 6217 .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, 6218 ); 6219 } 6220 6221 try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); 6222 } 6223 } 6224 }, 6225 6226 .ErrorUnion, 6227 .NoReturn, 6228 .Array, 6229 .Struct, 6230 .Undefined, 6231 .Null, 6232 .Optional, 6233 .BoundFn, 6234 .Opaque, 6235 .Vector, 6236 .Frame, 6237 .AnyFrame, 6238 .ComptimeFloat, 6239 .Float, 6240 => return sema.fail(block, operand_src, "invalid switch operand type '{}'", .{ 6241 operand_ty, 6242 }), 6243 } 6244 6245 const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); 6246 try sema.air_instructions.append(gpa, .{ 6247 .tag = .block, 6248 .data = undefined, 6249 }); 6250 var label: Block.Label = .{ 6251 .zir_block = inst, 6252 .merges = .{ 6253 .results = .{}, 6254 .br_list = .{}, 6255 .block_inst = block_inst, 6256 }, 6257 }; 6258 6259 var child_block: Block = .{ 6260 .parent = block, 6261 .sema = sema, 6262 .src_decl = block.src_decl, 6263 .namespace = block.namespace, 6264 .wip_capture_scope = block.wip_capture_scope, 6265 .instructions = .{}, 6266 .label = &label, 6267 .inlining = block.inlining, 6268 .is_comptime = block.is_comptime, 6269 }; 6270 const merges = &child_block.label.?.merges; 6271 defer child_block.instructions.deinit(gpa); 6272 defer merges.results.deinit(gpa); 6273 defer merges.br_list.deinit(gpa); 6274 6275 if (try sema.resolveDefinedValue(&child_block, src, operand)) |operand_val| { 6276 var extra_index: usize = special.end; 6277 { 6278 var scalar_i: usize = 0; 6279 while (scalar_i < scalar_cases_len) : (scalar_i += 1) { 6280 const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); 6281 extra_index += 1; 6282 const body_len = sema.code.extra[extra_index]; 6283 extra_index += 1; 6284 const body = sema.code.extra[extra_index..][0..body_len]; 6285 extra_index += body_len; 6286 6287 const item = sema.resolveInst(item_ref); 6288 // Validation above ensured these will succeed. 6289 const item_val = sema.resolveConstValue(&child_block, .unneeded, item) catch unreachable; 6290 if (operand_val.eql(item_val, operand_ty)) { 6291 return sema.resolveBlockBody(block, src, &child_block, body, merges); 6292 } 6293 } 6294 } 6295 { 6296 var multi_i: usize = 0; 6297 while (multi_i < multi_cases_len) : (multi_i += 1) { 6298 const items_len = sema.code.extra[extra_index]; 6299 extra_index += 1; 6300 const ranges_len = sema.code.extra[extra_index]; 6301 extra_index += 1; 6302 const body_len = sema.code.extra[extra_index]; 6303 extra_index += 1; 6304 const items = sema.code.refSlice(extra_index, items_len); 6305 extra_index += items_len; 6306 const body = sema.code.extra[extra_index + 2 * ranges_len ..][0..body_len]; 6307 6308 for (items) |item_ref| { 6309 const item = sema.resolveInst(item_ref); 6310 // Validation above ensured these will succeed. 6311 const item_val = sema.resolveConstValue(&child_block, .unneeded, item) catch unreachable; 6312 if (operand_val.eql(item_val, operand_ty)) { 6313 return sema.resolveBlockBody(block, src, &child_block, body, merges); 6314 } 6315 } 6316 6317 var range_i: usize = 0; 6318 while (range_i < ranges_len) : (range_i += 1) { 6319 const item_first = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); 6320 extra_index += 1; 6321 const item_last = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); 6322 extra_index += 1; 6323 6324 // Validation above ensured these will succeed. 6325 const first_tv = sema.resolveInstConst(&child_block, .unneeded, item_first) catch unreachable; 6326 const last_tv = sema.resolveInstConst(&child_block, .unneeded, item_last) catch unreachable; 6327 if (Value.compare(operand_val, .gte, first_tv.val, operand_ty) and 6328 Value.compare(operand_val, .lte, last_tv.val, operand_ty)) 6329 { 6330 return sema.resolveBlockBody(block, src, &child_block, body, merges); 6331 } 6332 } 6333 6334 extra_index += body_len; 6335 } 6336 } 6337 return sema.resolveBlockBody(block, src, &child_block, special.body, merges); 6338 } 6339 6340 if (scalar_cases_len + multi_cases_len == 0) { 6341 return sema.resolveBlockBody(block, src, &child_block, special.body, merges); 6342 } 6343 6344 try sema.requireRuntimeBlock(block, src); 6345 6346 const estimated_cases_extra = (scalar_cases_len + multi_cases_len) * 6347 @typeInfo(Air.SwitchBr.Case).Struct.fields.len + 2; 6348 var cases_extra = try std.ArrayListUnmanaged(u32).initCapacity(gpa, estimated_cases_extra); 6349 defer cases_extra.deinit(gpa); 6350 6351 var case_block = child_block.makeSubBlock(); 6352 case_block.runtime_loop = null; 6353 case_block.runtime_cond = operand_src; 6354 case_block.runtime_index += 1; 6355 defer case_block.instructions.deinit(gpa); 6356 6357 var extra_index: usize = special.end; 6358 6359 var scalar_i: usize = 0; 6360 while (scalar_i < scalar_cases_len) : (scalar_i += 1) { 6361 const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); 6362 extra_index += 1; 6363 const body_len = sema.code.extra[extra_index]; 6364 extra_index += 1; 6365 const body = sema.code.extra[extra_index..][0..body_len]; 6366 extra_index += body_len; 6367 6368 var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope); 6369 defer wip_captures.deinit(); 6370 6371 case_block.instructions.shrinkRetainingCapacity(0); 6372 case_block.wip_capture_scope = wip_captures.scope; 6373 6374 const item = sema.resolveInst(item_ref); 6375 // `item` is already guaranteed to be constant known. 6376 6377 _ = try sema.analyzeBody(&case_block, body); 6378 6379 try wip_captures.finalize(); 6380 6381 try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); 6382 cases_extra.appendAssumeCapacity(1); // items_len 6383 cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); 6384 cases_extra.appendAssumeCapacity(@enumToInt(item)); 6385 cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); 6386 } 6387 6388 var is_first = true; 6389 var prev_cond_br: Air.Inst.Index = undefined; 6390 var first_else_body: []const Air.Inst.Index = &.{}; 6391 defer gpa.free(first_else_body); 6392 var prev_then_body: []const Air.Inst.Index = &.{}; 6393 defer gpa.free(prev_then_body); 6394 6395 var cases_len = scalar_cases_len; 6396 var multi_i: usize = 0; 6397 while (multi_i < multi_cases_len) : (multi_i += 1) { 6398 const items_len = sema.code.extra[extra_index]; 6399 extra_index += 1; 6400 const ranges_len = sema.code.extra[extra_index]; 6401 extra_index += 1; 6402 const body_len = sema.code.extra[extra_index]; 6403 extra_index += 1; 6404 const items = sema.code.refSlice(extra_index, items_len); 6405 extra_index += items_len; 6406 6407 case_block.instructions.shrinkRetainingCapacity(0); 6408 case_block.wip_capture_scope = child_block.wip_capture_scope; 6409 6410 var any_ok: Air.Inst.Ref = .none; 6411 6412 // If there are any ranges, we have to put all the items into the 6413 // else prong. Otherwise, we can take advantage of multiple items 6414 // mapping to the same body. 6415 if (ranges_len == 0) { 6416 cases_len += 1; 6417 6418 const body = sema.code.extra[extra_index..][0..body_len]; 6419 extra_index += body_len; 6420 _ = try sema.analyzeBody(&case_block, body); 6421 6422 try cases_extra.ensureUnusedCapacity(gpa, 2 + items.len + 6423 case_block.instructions.items.len); 6424 6425 cases_extra.appendAssumeCapacity(@intCast(u32, items.len)); 6426 cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); 6427 6428 for (items) |item_ref| { 6429 const item = sema.resolveInst(item_ref); 6430 cases_extra.appendAssumeCapacity(@enumToInt(item)); 6431 } 6432 6433 cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); 6434 } else { 6435 for (items) |item_ref| { 6436 const item = sema.resolveInst(item_ref); 6437 const cmp_ok = try case_block.addBinOp(.cmp_eq, operand, item); 6438 if (any_ok != .none) { 6439 any_ok = try case_block.addBinOp(.bool_or, any_ok, cmp_ok); 6440 } else { 6441 any_ok = cmp_ok; 6442 } 6443 } 6444 6445 var range_i: usize = 0; 6446 while (range_i < ranges_len) : (range_i += 1) { 6447 const first_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); 6448 extra_index += 1; 6449 const last_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); 6450 extra_index += 1; 6451 6452 const item_first = sema.resolveInst(first_ref); 6453 const item_last = sema.resolveInst(last_ref); 6454 6455 // operand >= first and operand <= last 6456 const range_first_ok = try case_block.addBinOp( 6457 .cmp_gte, 6458 operand, 6459 item_first, 6460 ); 6461 const range_last_ok = try case_block.addBinOp( 6462 .cmp_lte, 6463 operand, 6464 item_last, 6465 ); 6466 const range_ok = try case_block.addBinOp( 6467 .bool_and, 6468 range_first_ok, 6469 range_last_ok, 6470 ); 6471 if (any_ok != .none) { 6472 any_ok = try case_block.addBinOp(.bool_or, any_ok, range_ok); 6473 } else { 6474 any_ok = range_ok; 6475 } 6476 } 6477 6478 const new_cond_br = try case_block.addInstAsIndex(.{ .tag = .cond_br, .data = .{ 6479 .pl_op = .{ 6480 .operand = any_ok, 6481 .payload = undefined, 6482 }, 6483 } }); 6484 var cond_body = case_block.instructions.toOwnedSlice(gpa); 6485 defer gpa.free(cond_body); 6486 6487 var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope); 6488 defer wip_captures.deinit(); 6489 6490 case_block.instructions.shrinkRetainingCapacity(0); 6491 case_block.wip_capture_scope = wip_captures.scope; 6492 6493 const body = sema.code.extra[extra_index..][0..body_len]; 6494 extra_index += body_len; 6495 _ = try sema.analyzeBody(&case_block, body); 6496 6497 try wip_captures.finalize(); 6498 6499 if (is_first) { 6500 is_first = false; 6501 first_else_body = cond_body; 6502 cond_body = &.{}; 6503 } else { 6504 try sema.air_extra.ensureUnusedCapacity( 6505 gpa, 6506 @typeInfo(Air.CondBr).Struct.fields.len + prev_then_body.len + cond_body.len, 6507 ); 6508 6509 sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload = 6510 sema.addExtraAssumeCapacity(Air.CondBr{ 6511 .then_body_len = @intCast(u32, prev_then_body.len), 6512 .else_body_len = @intCast(u32, cond_body.len), 6513 }); 6514 sema.air_extra.appendSliceAssumeCapacity(prev_then_body); 6515 sema.air_extra.appendSliceAssumeCapacity(cond_body); 6516 } 6517 prev_then_body = case_block.instructions.toOwnedSlice(gpa); 6518 prev_cond_br = new_cond_br; 6519 } 6520 } 6521 6522 var final_else_body: []const Air.Inst.Index = &.{}; 6523 if (special.body.len != 0 or !is_first) { 6524 var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope); 6525 defer wip_captures.deinit(); 6526 6527 case_block.instructions.shrinkRetainingCapacity(0); 6528 case_block.wip_capture_scope = wip_captures.scope; 6529 6530 if (special.body.len != 0) { 6531 _ = try sema.analyzeBody(&case_block, special.body); 6532 } else { 6533 // We still need a terminator in this block, but we have proven 6534 // that it is unreachable. 6535 // TODO this should be a special safety panic other than unreachable, something 6536 // like "panic: switch operand had corrupt value not allowed by the type" 6537 try case_block.addUnreachable(src, true); 6538 } 6539 6540 try wip_captures.finalize(); 6541 6542 if (is_first) { 6543 final_else_body = case_block.instructions.items; 6544 } else { 6545 try sema.air_extra.ensureUnusedCapacity(gpa, prev_then_body.len + 6546 @typeInfo(Air.CondBr).Struct.fields.len + case_block.instructions.items.len); 6547 6548 sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload = 6549 sema.addExtraAssumeCapacity(Air.CondBr{ 6550 .then_body_len = @intCast(u32, prev_then_body.len), 6551 .else_body_len = @intCast(u32, case_block.instructions.items.len), 6552 }); 6553 sema.air_extra.appendSliceAssumeCapacity(prev_then_body); 6554 sema.air_extra.appendSliceAssumeCapacity(case_block.instructions.items); 6555 final_else_body = first_else_body; 6556 } 6557 } 6558 6559 try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr).Struct.fields.len + 6560 cases_extra.items.len + final_else_body.len); 6561 6562 _ = try child_block.addInst(.{ .tag = .switch_br, .data = .{ .pl_op = .{ 6563 .operand = operand, 6564 .payload = sema.addExtraAssumeCapacity(Air.SwitchBr{ 6565 .cases_len = @intCast(u32, cases_len), 6566 .else_body_len = @intCast(u32, final_else_body.len), 6567 }), 6568 } } }); 6569 sema.air_extra.appendSliceAssumeCapacity(cases_extra.items); 6570 sema.air_extra.appendSliceAssumeCapacity(final_else_body); 6571 6572 return sema.analyzeBlockBody(block, src, &child_block, merges); 6573} 6574 6575fn resolveSwitchItemVal( 6576 sema: *Sema, 6577 block: *Block, 6578 item_ref: Zir.Inst.Ref, 6579 switch_node_offset: i32, 6580 switch_prong_src: Module.SwitchProngSrc, 6581 range_expand: Module.SwitchProngSrc.RangeExpand, 6582) CompileError!TypedValue { 6583 const item = sema.resolveInst(item_ref); 6584 const item_ty = sema.typeOf(item); 6585 // Constructing a LazySrcLoc is costly because we only have the switch AST node. 6586 // Only if we know for sure we need to report a compile error do we resolve the 6587 // full source locations. 6588 if (sema.resolveConstValue(block, .unneeded, item)) |val| { 6589 return TypedValue{ .ty = item_ty, .val = val }; 6590 } else |err| switch (err) { 6591 error.NeededSourceLocation => { 6592 const src = switch_prong_src.resolve(sema.gpa, block.src_decl, switch_node_offset, range_expand); 6593 return TypedValue{ 6594 .ty = item_ty, 6595 .val = try sema.resolveConstValue(block, src, item), 6596 }; 6597 }, 6598 else => |e| return e, 6599 } 6600} 6601 6602fn validateSwitchRange( 6603 sema: *Sema, 6604 block: *Block, 6605 range_set: *RangeSet, 6606 first_ref: Zir.Inst.Ref, 6607 last_ref: Zir.Inst.Ref, 6608 operand_ty: Type, 6609 src_node_offset: i32, 6610 switch_prong_src: Module.SwitchProngSrc, 6611) CompileError!void { 6612 const first_val = (try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first)).val; 6613 const last_val = (try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last)).val; 6614 const maybe_prev_src = try range_set.add(first_val, last_val, operand_ty, switch_prong_src); 6615 return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); 6616} 6617 6618fn validateSwitchItem( 6619 sema: *Sema, 6620 block: *Block, 6621 range_set: *RangeSet, 6622 item_ref: Zir.Inst.Ref, 6623 operand_ty: Type, 6624 src_node_offset: i32, 6625 switch_prong_src: Module.SwitchProngSrc, 6626) CompileError!void { 6627 const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; 6628 const maybe_prev_src = try range_set.add(item_val, item_val, operand_ty, switch_prong_src); 6629 return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); 6630} 6631 6632fn validateSwitchItemEnum( 6633 sema: *Sema, 6634 block: *Block, 6635 seen_fields: []?Module.SwitchProngSrc, 6636 item_ref: Zir.Inst.Ref, 6637 src_node_offset: i32, 6638 switch_prong_src: Module.SwitchProngSrc, 6639) CompileError!void { 6640 const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); 6641 const field_index = item_tv.ty.enumTagFieldIndex(item_tv.val) orelse { 6642 const msg = msg: { 6643 const src = switch_prong_src.resolve(sema.gpa, block.src_decl, src_node_offset, .none); 6644 const msg = try sema.errMsg( 6645 block, 6646 src, 6647 "enum '{}' has no tag with value '{}'", 6648 .{ item_tv.ty, item_tv.val }, 6649 ); 6650 errdefer msg.destroy(sema.gpa); 6651 try sema.mod.errNoteNonLazy( 6652 item_tv.ty.declSrcLoc(), 6653 msg, 6654 "enum declared here", 6655 .{}, 6656 ); 6657 break :msg msg; 6658 }; 6659 return sema.failWithOwnedErrorMsg(msg); 6660 }; 6661 const maybe_prev_src = seen_fields[field_index]; 6662 seen_fields[field_index] = switch_prong_src; 6663 return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); 6664} 6665 6666fn validateSwitchDupe( 6667 sema: *Sema, 6668 block: *Block, 6669 maybe_prev_src: ?Module.SwitchProngSrc, 6670 switch_prong_src: Module.SwitchProngSrc, 6671 src_node_offset: i32, 6672) CompileError!void { 6673 const prev_prong_src = maybe_prev_src orelse return; 6674 const gpa = sema.gpa; 6675 const src = switch_prong_src.resolve(gpa, block.src_decl, src_node_offset, .none); 6676 const prev_src = prev_prong_src.resolve(gpa, block.src_decl, src_node_offset, .none); 6677 const msg = msg: { 6678 const msg = try sema.errMsg( 6679 block, 6680 src, 6681 "duplicate switch value", 6682 .{}, 6683 ); 6684 errdefer msg.destroy(sema.gpa); 6685 try sema.errNote( 6686 block, 6687 prev_src, 6688 msg, 6689 "previous value here", 6690 .{}, 6691 ); 6692 break :msg msg; 6693 }; 6694 return sema.failWithOwnedErrorMsg(msg); 6695} 6696 6697fn validateSwitchItemBool( 6698 sema: *Sema, 6699 block: *Block, 6700 true_count: *u8, 6701 false_count: *u8, 6702 item_ref: Zir.Inst.Ref, 6703 src_node_offset: i32, 6704 switch_prong_src: Module.SwitchProngSrc, 6705) CompileError!void { 6706 const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; 6707 if (item_val.toBool()) { 6708 true_count.* += 1; 6709 } else { 6710 false_count.* += 1; 6711 } 6712 if (true_count.* + false_count.* > 2) { 6713 const src = switch_prong_src.resolve(sema.gpa, block.src_decl, src_node_offset, .none); 6714 return sema.fail(block, src, "duplicate switch value", .{}); 6715 } 6716} 6717 6718const ValueSrcMap = std.HashMap(Value, Module.SwitchProngSrc, Value.HashContext, std.hash_map.default_max_load_percentage); 6719 6720fn validateSwitchItemSparse( 6721 sema: *Sema, 6722 block: *Block, 6723 seen_values: *ValueSrcMap, 6724 item_ref: Zir.Inst.Ref, 6725 src_node_offset: i32, 6726 switch_prong_src: Module.SwitchProngSrc, 6727) CompileError!void { 6728 const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; 6729 const kv = (try seen_values.fetchPut(item_val, switch_prong_src)) orelse return; 6730 return sema.validateSwitchDupe(block, kv.value, switch_prong_src, src_node_offset); 6731} 6732 6733fn validateSwitchNoRange( 6734 sema: *Sema, 6735 block: *Block, 6736 ranges_len: u32, 6737 operand_ty: Type, 6738 src_node_offset: i32, 6739) CompileError!void { 6740 if (ranges_len == 0) 6741 return; 6742 6743 const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset }; 6744 const range_src: LazySrcLoc = .{ .node_offset_switch_range = src_node_offset }; 6745 6746 const msg = msg: { 6747 const msg = try sema.errMsg( 6748 block, 6749 operand_src, 6750 "ranges not allowed when switching on type '{}'", 6751 .{operand_ty}, 6752 ); 6753 errdefer msg.destroy(sema.gpa); 6754 try sema.errNote( 6755 block, 6756 range_src, 6757 msg, 6758 "range here", 6759 .{}, 6760 ); 6761 break :msg msg; 6762 }; 6763 return sema.failWithOwnedErrorMsg(msg); 6764} 6765 6766fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 6767 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 6768 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 6769 const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 6770 const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 6771 const unresolved_ty = try sema.resolveType(block, ty_src, extra.lhs); 6772 const field_name = try sema.resolveConstString(block, name_src, extra.rhs); 6773 const ty = try sema.resolveTypeFields(block, ty_src, unresolved_ty); 6774 6775 const has_field = hf: { 6776 if (ty.isSlice()) { 6777 if (mem.eql(u8, field_name, "ptr")) break :hf true; 6778 if (mem.eql(u8, field_name, "len")) break :hf true; 6779 break :hf false; 6780 } 6781 break :hf switch (ty.zigTypeTag()) { 6782 .Struct => ty.structFields().contains(field_name), 6783 .Union => ty.unionFields().contains(field_name), 6784 .Enum => ty.enumFields().contains(field_name), 6785 .Array => mem.eql(u8, field_name, "len"), 6786 else => return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ 6787 ty, 6788 }), 6789 }; 6790 }; 6791 if (has_field) { 6792 return Air.Inst.Ref.bool_true; 6793 } else { 6794 return Air.Inst.Ref.bool_false; 6795 } 6796} 6797 6798fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 6799 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 6800 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 6801 const src = inst_data.src(); 6802 const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 6803 const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 6804 const container_type = try sema.resolveType(block, lhs_src, extra.lhs); 6805 const decl_name = try sema.resolveConstString(block, rhs_src, extra.rhs); 6806 6807 const namespace = container_type.getNamespace() orelse return sema.fail( 6808 block, 6809 lhs_src, 6810 "expected struct, enum, union, or opaque, found '{}'", 6811 .{container_type}, 6812 ); 6813 if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl| { 6814 if (decl.is_pub or decl.getFileScope() == block.getFileScope()) { 6815 return Air.Inst.Ref.bool_true; 6816 } 6817 } 6818 return Air.Inst.Ref.bool_false; 6819} 6820 6821fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 6822 const tracy = trace(@src()); 6823 defer tracy.end(); 6824 6825 const mod = sema.mod; 6826 const inst_data = sema.code.instructions.items(.data)[inst].str_tok; 6827 const operand_src = inst_data.src(); 6828 const operand = inst_data.get(sema.code); 6829 6830 const result = mod.importFile(block.getFileScope(), operand) catch |err| switch (err) { 6831 error.ImportOutsidePkgPath => { 6832 return sema.fail(block, operand_src, "import of file outside package path: '{s}'", .{operand}); 6833 }, 6834 else => { 6835 // TODO: these errors are file system errors; make sure an update() will 6836 // retry this and not cache the file system error, which may be transient. 6837 return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ operand, @errorName(err) }); 6838 }, 6839 }; 6840 try mod.semaFile(result.file); 6841 const file_root_decl = result.file.root_decl.?; 6842 try mod.declareDeclDependency(sema.owner_decl, file_root_decl); 6843 return sema.addConstant(file_root_decl.ty, file_root_decl.val); 6844} 6845 6846fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 6847 const tracy = trace(@src()); 6848 defer tracy.end(); 6849 6850 const mod = sema.mod; 6851 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 6852 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 6853 const name = try sema.resolveConstString(block, operand_src, inst_data.operand); 6854 6855 const embed_file = mod.embedFile(block.getFileScope(), name) catch |err| switch (err) { 6856 error.ImportOutsidePkgPath => { 6857 return sema.fail(block, operand_src, "embed of file outside package path: '{s}'", .{name}); 6858 }, 6859 else => { 6860 // TODO: these errors are file system errors; make sure an update() will 6861 // retry this and not cache the file system error, which may be transient. 6862 return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ name, @errorName(err) }); 6863 }, 6864 }; 6865 6866 var anon_decl = try block.startAnonDecl(); 6867 defer anon_decl.deinit(); 6868 6869 const bytes_including_null = embed_file.bytes[0 .. embed_file.bytes.len + 1]; 6870 6871 // TODO instead of using `Value.Tag.bytes`, create a new value tag for pointing at 6872 // a `*Module.EmbedFile`. The purpose of this would be: 6873 // - If only the length is read and the bytes are not inspected by comptime code, 6874 // there can be an optimization where the codegen backend does a copy_file_range 6875 // into the final binary, and never loads the data into memory. 6876 // - When a Decl is destroyed, it can free the `*Module.EmbedFile`. 6877 embed_file.owner_decl = try anon_decl.finish( 6878 try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), embed_file.bytes.len), 6879 try Value.Tag.bytes.create(anon_decl.arena(), bytes_including_null), 6880 ); 6881 6882 return sema.analyzeDeclRef(embed_file.owner_decl); 6883} 6884 6885fn zirRetErrValueCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 6886 _ = block; 6887 _ = inst; 6888 return sema.fail(block, sema.src, "TODO implement zirRetErrValueCode", .{}); 6889} 6890 6891fn zirShl( 6892 sema: *Sema, 6893 block: *Block, 6894 inst: Zir.Inst.Index, 6895 air_tag: Air.Inst.Tag, 6896) CompileError!Air.Inst.Ref { 6897 const tracy = trace(@src()); 6898 defer tracy.end(); 6899 6900 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 6901 const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; 6902 const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; 6903 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 6904 const lhs = sema.resolveInst(extra.lhs); 6905 const rhs = sema.resolveInst(extra.rhs); 6906 6907 // TODO coerce rhs if air_tag is not shl_sat 6908 6909 const maybe_lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs); 6910 const maybe_rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, rhs); 6911 6912 const runtime_src = if (maybe_lhs_val) |lhs_val| rs: { 6913 const lhs_ty = sema.typeOf(lhs); 6914 6915 if (lhs_val.isUndef()) return sema.addConstUndef(lhs_ty); 6916 const rhs_val = maybe_rhs_val orelse break :rs rhs_src; 6917 if (rhs_val.isUndef()) return sema.addConstUndef(lhs_ty); 6918 6919 // If rhs is 0, return lhs without doing any calculations. 6920 if (rhs_val.compareWithZero(.eq)) { 6921 return sema.addConstant(lhs_ty, lhs_val); 6922 } 6923 const val = switch (air_tag) { 6924 .shl_exact => return sema.fail(block, lhs_src, "TODO implement Sema for comptime shl_exact", .{}), 6925 .shl_sat => try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, sema.mod.getTarget()), 6926 .shl => try lhs_val.shl(rhs_val, sema.arena), 6927 else => unreachable, 6928 }; 6929 6930 return sema.addConstant(lhs_ty, val); 6931 } else rs: { 6932 if (maybe_rhs_val) |rhs_val| { 6933 if (rhs_val.isUndef()) return sema.addConstUndef(sema.typeOf(lhs)); 6934 } 6935 break :rs lhs_src; 6936 }; 6937 6938 // TODO: insert runtime safety check for shl_exact 6939 6940 try sema.requireRuntimeBlock(block, runtime_src); 6941 return block.addBinOp(air_tag, lhs, rhs); 6942} 6943 6944fn zirShr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 6945 const tracy = trace(@src()); 6946 defer tracy.end(); 6947 6948 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 6949 const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; 6950 const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; 6951 const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; 6952 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 6953 const lhs = sema.resolveInst(extra.lhs); 6954 const rhs = sema.resolveInst(extra.rhs); 6955 6956 if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| { 6957 if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| { 6958 const lhs_ty = sema.typeOf(lhs); 6959 if (lhs_val.isUndef() or rhs_val.isUndef()) { 6960 return sema.addConstUndef(lhs_ty); 6961 } 6962 // If rhs is 0, return lhs without doing any calculations. 6963 if (rhs_val.compareWithZero(.eq)) { 6964 return sema.addConstant(lhs_ty, lhs_val); 6965 } 6966 const val = try lhs_val.shr(rhs_val, sema.arena); 6967 return sema.addConstant(lhs_ty, val); 6968 } 6969 } 6970 6971 try sema.requireRuntimeBlock(block, src); 6972 return block.addBinOp(.shr, lhs, rhs); 6973} 6974 6975fn zirBitwise( 6976 sema: *Sema, 6977 block: *Block, 6978 inst: Zir.Inst.Index, 6979 air_tag: Air.Inst.Tag, 6980) CompileError!Air.Inst.Ref { 6981 const tracy = trace(@src()); 6982 defer tracy.end(); 6983 6984 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 6985 const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; 6986 const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; 6987 const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; 6988 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 6989 const lhs = sema.resolveInst(extra.lhs); 6990 const rhs = sema.resolveInst(extra.rhs); 6991 const lhs_ty = sema.typeOf(lhs); 6992 const rhs_ty = sema.typeOf(rhs); 6993 6994 const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; 6995 const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]LazySrcLoc{ lhs_src, rhs_src } }); 6996 const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); 6997 const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); 6998 6999 const scalar_type = if (resolved_type.zigTypeTag() == .Vector) 7000 resolved_type.elemType() 7001 else 7002 resolved_type; 7003 7004 const scalar_tag = scalar_type.zigTypeTag(); 7005 7006 if (lhs_ty.zigTypeTag() == .Vector and rhs_ty.zigTypeTag() == .Vector) { 7007 if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { 7008 return sema.fail(block, src, "vector length mismatch: {d} and {d}", .{ 7009 lhs_ty.arrayLen(), 7010 rhs_ty.arrayLen(), 7011 }); 7012 } 7013 return sema.fail(block, src, "TODO implement support for vectors in zirBitwise", .{}); 7014 } else if (lhs_ty.zigTypeTag() == .Vector or rhs_ty.zigTypeTag() == .Vector) { 7015 return sema.fail(block, src, "mixed scalar and vector operands to binary expression: '{}' and '{}'", .{ 7016 lhs_ty, 7017 rhs_ty, 7018 }); 7019 } 7020 7021 const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; 7022 7023 if (!is_int) { 7024 return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag()), @tagName(rhs_ty.zigTypeTag()) }); 7025 } 7026 7027 if (try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs)) |lhs_val| { 7028 if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| { 7029 const result_val = switch (air_tag) { 7030 .bit_and => try lhs_val.bitwiseAnd(rhs_val, sema.arena), 7031 .bit_or => try lhs_val.bitwiseOr(rhs_val, sema.arena), 7032 .xor => try lhs_val.bitwiseXor(rhs_val, sema.arena), 7033 else => unreachable, 7034 }; 7035 return sema.addConstant(scalar_type, result_val); 7036 } 7037 } 7038 7039 try sema.requireRuntimeBlock(block, src); 7040 return block.addBinOp(air_tag, casted_lhs, casted_rhs); 7041} 7042 7043fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 7044 const tracy = trace(@src()); 7045 defer tracy.end(); 7046 7047 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 7048 const src = inst_data.src(); 7049 const operand_src = src; // TODO put this on the operand, not the '~' 7050 7051 const operand = sema.resolveInst(inst_data.operand); 7052 const operand_type = sema.typeOf(operand); 7053 const scalar_type = operand_type.scalarType(); 7054 7055 if (scalar_type.zigTypeTag() != .Int) { 7056 return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{operand_type}); 7057 } 7058 7059 if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { 7060 const target = sema.mod.getTarget(); 7061 if (val.isUndef()) { 7062 return sema.addConstUndef(scalar_type); 7063 } else if (operand_type.zigTypeTag() == .Vector) { 7064 const vec_len = try sema.usizeCast(block, operand_src, operand_type.arrayLen()); 7065 var elem_val_buf: Value.ElemValueBuffer = undefined; 7066 const elems = try sema.arena.alloc(Value, vec_len); 7067 for (elems) |*elem, i| { 7068 const elem_val = val.elemValueBuffer(i, &elem_val_buf); 7069 elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, target); 7070 } 7071 return sema.addConstant( 7072 operand_type, 7073 try Value.Tag.array.create(sema.arena, elems), 7074 ); 7075 } else { 7076 const result_val = try val.bitwiseNot(scalar_type, sema.arena, target); 7077 return sema.addConstant(scalar_type, result_val); 7078 } 7079 } 7080 7081 try sema.requireRuntimeBlock(block, src); 7082 return block.addTyOp(.not, operand_type, operand); 7083} 7084 7085fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 7086 const tracy = trace(@src()); 7087 defer tracy.end(); 7088 7089 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 7090 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 7091 const lhs = sema.resolveInst(extra.lhs); 7092 const rhs = sema.resolveInst(extra.rhs); 7093 const lhs_ty = sema.typeOf(lhs); 7094 const rhs_ty = sema.typeOf(rhs); 7095 const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; 7096 const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; 7097 7098 const lhs_info = getArrayCatInfo(lhs_ty) orelse 7099 return sema.fail(block, lhs_src, "expected array, found '{}'", .{lhs_ty}); 7100 const rhs_info = getArrayCatInfo(rhs_ty) orelse 7101 return sema.fail(block, rhs_src, "expected array, found '{}'", .{rhs_ty}); 7102 if (!lhs_info.elem_type.eql(rhs_info.elem_type)) { 7103 return sema.fail(block, rhs_src, "expected array of type '{}', found '{}'", .{ lhs_info.elem_type, rhs_ty }); 7104 } 7105 7106 // When there is a sentinel mismatch, no sentinel on the result. The type system 7107 // will catch this if it is a problem. 7108 var res_sent: ?Value = null; 7109 if (rhs_info.sentinel != null and lhs_info.sentinel != null) { 7110 if (rhs_info.sentinel.?.eql(lhs_info.sentinel.?, lhs_info.elem_type)) { 7111 res_sent = lhs_info.sentinel.?; 7112 } 7113 } 7114 7115 if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| { 7116 if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| { 7117 const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len); 7118 const rhs_len = try sema.usizeCast(block, lhs_src, rhs_info.len); 7119 const final_len = lhs_len + rhs_len; 7120 const final_len_including_sent = final_len + @boolToInt(res_sent != null); 7121 const is_pointer = lhs_ty.zigTypeTag() == .Pointer; 7122 const lhs_sub_val = if (is_pointer) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val; 7123 const rhs_sub_val = if (is_pointer) (try sema.pointerDeref(block, rhs_src, rhs_val, rhs_ty)).? else rhs_val; 7124 var anon_decl = try block.startAnonDecl(); 7125 defer anon_decl.deinit(); 7126 7127 const buf = try anon_decl.arena().alloc(Value, final_len_including_sent); 7128 { 7129 var i: usize = 0; 7130 while (i < lhs_len) : (i += 1) { 7131 const val = try lhs_sub_val.elemValue(sema.arena, i); 7132 buf[i] = try val.copy(anon_decl.arena()); 7133 } 7134 } 7135 { 7136 var i: usize = 0; 7137 while (i < rhs_len) : (i += 1) { 7138 const val = try rhs_sub_val.elemValue(sema.arena, i); 7139 buf[lhs_len + i] = try val.copy(anon_decl.arena()); 7140 } 7141 } 7142 const ty = if (res_sent) |rs| ty: { 7143 buf[final_len] = try rs.copy(anon_decl.arena()); 7144 break :ty try Type.Tag.array_sentinel.create(anon_decl.arena(), .{ 7145 .len = final_len, 7146 .elem_type = try lhs_info.elem_type.copy(anon_decl.arena()), 7147 .sentinel = try rs.copy(anon_decl.arena()), 7148 }); 7149 } else try Type.Tag.array.create(anon_decl.arena(), .{ 7150 .len = final_len, 7151 .elem_type = try lhs_info.elem_type.copy(anon_decl.arena()), 7152 }); 7153 const val = try Value.Tag.array.create(anon_decl.arena(), buf); 7154 const decl = try anon_decl.finish(ty, val); 7155 if (is_pointer) { 7156 return sema.analyzeDeclRef(decl); 7157 } else { 7158 return sema.analyzeDeclVal(block, .unneeded, decl); 7159 } 7160 } else { 7161 return sema.fail(block, lhs_src, "TODO runtime array_cat", .{}); 7162 } 7163 } else { 7164 return sema.fail(block, lhs_src, "TODO runtime array_cat", .{}); 7165 } 7166} 7167 7168fn getArrayCatInfo(t: Type) ?Type.ArrayInfo { 7169 return switch (t.zigTypeTag()) { 7170 .Array => t.arrayInfo(), 7171 .Pointer => blk: { 7172 const ptrinfo = t.ptrInfo().data; 7173 if (ptrinfo.pointee_type.zigTypeTag() != .Array) return null; 7174 if (ptrinfo.size != .One) return null; 7175 break :blk ptrinfo.pointee_type.arrayInfo(); 7176 }, 7177 else => null, 7178 }; 7179} 7180 7181fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 7182 const tracy = trace(@src()); 7183 defer tracy.end(); 7184 7185 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 7186 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 7187 const lhs = sema.resolveInst(extra.lhs); 7188 const lhs_ty = sema.typeOf(lhs); 7189 const src: LazySrcLoc = inst_data.src(); 7190 const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; 7191 const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; 7192 7193 // In `**` rhs has to be comptime-known, but lhs can be runtime-known 7194 const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize); 7195 const mulinfo = getArrayCatInfo(lhs_ty) orelse 7196 return sema.fail(block, lhs_src, "expected array, found '{}'", .{lhs_ty}); 7197 7198 const final_len_u64 = std.math.mul(u64, mulinfo.len, factor) catch 7199 return sema.fail(block, rhs_src, "operation results in overflow", .{}); 7200 7201 if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| { 7202 const final_len = try sema.usizeCast(block, src, final_len_u64); 7203 const final_len_including_sent = final_len + @boolToInt(mulinfo.sentinel != null); 7204 const lhs_len = try sema.usizeCast(block, lhs_src, mulinfo.len); 7205 7206 const lhs_sub_val = if (lhs_ty.zigTypeTag() == .Pointer) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val; 7207 7208 var anon_decl = try block.startAnonDecl(); 7209 defer anon_decl.deinit(); 7210 7211 const final_ty = if (mulinfo.sentinel) |sent| 7212 try Type.Tag.array_sentinel.create(anon_decl.arena(), .{ 7213 .len = final_len, 7214 .elem_type = try mulinfo.elem_type.copy(anon_decl.arena()), 7215 .sentinel = try sent.copy(anon_decl.arena()), 7216 }) 7217 else 7218 try Type.Tag.array.create(anon_decl.arena(), .{ 7219 .len = final_len, 7220 .elem_type = try mulinfo.elem_type.copy(anon_decl.arena()), 7221 }); 7222 const buf = try anon_decl.arena().alloc(Value, final_len_including_sent); 7223 7224 // Optimization for the common pattern of a single element repeated N times, such 7225 // as zero-filling a byte array. 7226 const val = if (lhs_len == 1) blk: { 7227 const elem_val = try lhs_sub_val.elemValue(sema.arena, 0); 7228 const copied_val = try elem_val.copy(anon_decl.arena()); 7229 break :blk try Value.Tag.repeated.create(anon_decl.arena(), copied_val); 7230 } else blk: { 7231 // the actual loop 7232 var i: usize = 0; 7233 while (i < factor) : (i += 1) { 7234 var j: usize = 0; 7235 while (j < lhs_len) : (j += 1) { 7236 const val = try lhs_sub_val.elemValue(sema.arena, j); 7237 buf[lhs_len * i + j] = try val.copy(anon_decl.arena()); 7238 } 7239 } 7240 if (mulinfo.sentinel) |sent| { 7241 buf[final_len] = try sent.copy(anon_decl.arena()); 7242 } 7243 break :blk try Value.Tag.array.create(anon_decl.arena(), buf); 7244 }; 7245 const decl = try anon_decl.finish(final_ty, val); 7246 if (lhs_ty.zigTypeTag() == .Pointer) { 7247 return sema.analyzeDeclRef(decl); 7248 } else { 7249 return sema.analyzeDeclVal(block, .unneeded, decl); 7250 } 7251 } 7252 return sema.fail(block, lhs_src, "TODO runtime array_mul", .{}); 7253} 7254 7255fn zirNegate( 7256 sema: *Sema, 7257 block: *Block, 7258 inst: Zir.Inst.Index, 7259 tag_override: Zir.Inst.Tag, 7260) CompileError!Air.Inst.Ref { 7261 const tracy = trace(@src()); 7262 defer tracy.end(); 7263 7264 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 7265 const src = inst_data.src(); 7266 const lhs_src = src; 7267 const rhs_src = src; // TODO better source location 7268 const lhs = sema.resolveInst(.zero); 7269 const rhs = sema.resolveInst(inst_data.operand); 7270 7271 return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src); 7272} 7273 7274fn zirArithmetic( 7275 sema: *Sema, 7276 block: *Block, 7277 inst: Zir.Inst.Index, 7278 zir_tag: Zir.Inst.Tag, 7279) CompileError!Air.Inst.Ref { 7280 const tracy = trace(@src()); 7281 defer tracy.end(); 7282 7283 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 7284 sema.src = .{ .node_offset_bin_op = inst_data.src_node }; 7285 const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; 7286 const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; 7287 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 7288 const lhs = sema.resolveInst(extra.lhs); 7289 const rhs = sema.resolveInst(extra.rhs); 7290 7291 return sema.analyzeArithmetic(block, zir_tag, lhs, rhs, sema.src, lhs_src, rhs_src); 7292} 7293 7294fn zirOverflowArithmetic( 7295 sema: *Sema, 7296 block: *Block, 7297 extended: Zir.Inst.Extended.InstData, 7298) CompileError!Air.Inst.Ref { 7299 const tracy = trace(@src()); 7300 defer tracy.end(); 7301 7302 const extra = sema.code.extraData(Zir.Inst.OverflowArithmetic, extended.operand).data; 7303 const src: LazySrcLoc = .{ .node_offset = extra.node }; 7304 7305 return sema.fail(block, src, "TODO implement Sema.zirOverflowArithmetic", .{}); 7306} 7307 7308fn analyzeArithmetic( 7309 sema: *Sema, 7310 block: *Block, 7311 /// TODO performance investigation: make this comptime? 7312 zir_tag: Zir.Inst.Tag, 7313 lhs: Air.Inst.Ref, 7314 rhs: Air.Inst.Ref, 7315 src: LazySrcLoc, 7316 lhs_src: LazySrcLoc, 7317 rhs_src: LazySrcLoc, 7318) CompileError!Air.Inst.Ref { 7319 const lhs_ty = sema.typeOf(lhs); 7320 const rhs_ty = sema.typeOf(rhs); 7321 const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); 7322 const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); 7323 if (lhs_zig_ty_tag == .Vector and rhs_zig_ty_tag == .Vector) { 7324 if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { 7325 return sema.fail(block, src, "vector length mismatch: {d} and {d}", .{ 7326 lhs_ty.arrayLen(), rhs_ty.arrayLen(), 7327 }); 7328 } 7329 return sema.fail(block, src, "TODO implement support for vectors in Sema.analyzeArithmetic", .{}); 7330 } else if (lhs_zig_ty_tag == .Vector or rhs_zig_ty_tag == .Vector) { 7331 return sema.fail(block, src, "mixed scalar and vector operands to binary expression: '{}' and '{}'", .{ 7332 lhs_ty, rhs_ty, 7333 }); 7334 } 7335 if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize()) { 7336 .One, .Slice => {}, 7337 .Many, .C => { 7338 const op_src = src; // TODO better source location 7339 const air_tag: Air.Inst.Tag = switch (zir_tag) { 7340 .add => .ptr_add, 7341 .sub => .ptr_sub, 7342 else => return sema.fail( 7343 block, 7344 op_src, 7345 "invalid pointer arithmetic operand: '{s}''", 7346 .{@tagName(zir_tag)}, 7347 ), 7348 }; 7349 return analyzePtrArithmetic(sema, block, op_src, lhs, rhs, air_tag, lhs_src, rhs_src); 7350 }, 7351 }; 7352 7353 const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; 7354 const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ 7355 .override = &[_]LazySrcLoc{ lhs_src, rhs_src }, 7356 }); 7357 const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); 7358 const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); 7359 7360 const scalar_type = if (resolved_type.zigTypeTag() == .Vector) 7361 resolved_type.elemType() 7362 else 7363 resolved_type; 7364 7365 const scalar_tag = scalar_type.zigTypeTag(); 7366 7367 const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; 7368 const is_float = scalar_tag == .Float or scalar_tag == .ComptimeFloat; 7369 7370 if (!is_int and !(is_float and floatOpAllowed(zir_tag))) { 7371 return sema.fail(block, src, "invalid operands to binary expression: '{s}' and '{s}'", .{ 7372 @tagName(lhs_zig_ty_tag), @tagName(rhs_zig_ty_tag), 7373 }); 7374 } 7375 7376 const target = sema.mod.getTarget(); 7377 const maybe_lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs); 7378 const maybe_rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs); 7379 const rs: struct { src: LazySrcLoc, air_tag: Air.Inst.Tag } = rs: { 7380 switch (zir_tag) { 7381 .add => { 7382 // For integers: 7383 // If either of the operands are zero, then the other operand is 7384 // returned, even if it is undefined. 7385 // If either of the operands are undefined, it's a compile error 7386 // because there is a possible value for which the addition would 7387 // overflow (max_int), causing illegal behavior. 7388 // For floats: either operand being undef makes the result undef. 7389 if (maybe_lhs_val) |lhs_val| { 7390 if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) { 7391 return casted_rhs; 7392 } 7393 } 7394 if (maybe_rhs_val) |rhs_val| { 7395 if (rhs_val.isUndef()) { 7396 if (is_int) { 7397 return sema.failWithUseOfUndef(block, rhs_src); 7398 } else { 7399 return sema.addConstUndef(scalar_type); 7400 } 7401 } 7402 if (rhs_val.compareWithZero(.eq)) { 7403 return casted_lhs; 7404 } 7405 } 7406 if (maybe_lhs_val) |lhs_val| { 7407 if (lhs_val.isUndef()) { 7408 if (is_int) { 7409 return sema.failWithUseOfUndef(block, lhs_src); 7410 } else { 7411 return sema.addConstUndef(scalar_type); 7412 } 7413 } 7414 if (maybe_rhs_val) |rhs_val| { 7415 if (is_int) { 7416 return sema.addConstant( 7417 scalar_type, 7418 try lhs_val.intAdd(rhs_val, sema.arena), 7419 ); 7420 } else { 7421 return sema.addConstant( 7422 scalar_type, 7423 try lhs_val.floatAdd(rhs_val, scalar_type, sema.arena), 7424 ); 7425 } 7426 } else break :rs .{ .src = rhs_src, .air_tag = .add }; 7427 } else break :rs .{ .src = lhs_src, .air_tag = .add }; 7428 }, 7429 .addwrap => { 7430 // Integers only; floats are checked above. 7431 // If either of the operands are zero, the other operand is returned. 7432 // If either of the operands are undefined, the result is undefined. 7433 if (maybe_lhs_val) |lhs_val| { 7434 if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) { 7435 return casted_rhs; 7436 } 7437 } 7438 if (maybe_rhs_val) |rhs_val| { 7439 if (rhs_val.isUndef()) { 7440 return sema.addConstUndef(scalar_type); 7441 } 7442 if (rhs_val.compareWithZero(.eq)) { 7443 return casted_lhs; 7444 } 7445 if (maybe_lhs_val) |lhs_val| { 7446 return sema.addConstant( 7447 scalar_type, 7448 try lhs_val.numberAddWrap(rhs_val, scalar_type, sema.arena, target), 7449 ); 7450 } else break :rs .{ .src = lhs_src, .air_tag = .addwrap }; 7451 } else break :rs .{ .src = rhs_src, .air_tag = .addwrap }; 7452 }, 7453 .add_sat => { 7454 // Integers only; floats are checked above. 7455 // If either of the operands are zero, then the other operand is returned. 7456 // If either of the operands are undefined, the result is undefined. 7457 if (maybe_lhs_val) |lhs_val| { 7458 if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) { 7459 return casted_rhs; 7460 } 7461 } 7462 if (maybe_rhs_val) |rhs_val| { 7463 if (rhs_val.isUndef()) { 7464 return sema.addConstUndef(scalar_type); 7465 } 7466 if (rhs_val.compareWithZero(.eq)) { 7467 return casted_lhs; 7468 } 7469 if (maybe_lhs_val) |lhs_val| { 7470 return sema.addConstant( 7471 scalar_type, 7472 try lhs_val.intAddSat(rhs_val, scalar_type, sema.arena, target), 7473 ); 7474 } else break :rs .{ .src = lhs_src, .air_tag = .add_sat }; 7475 } else break :rs .{ .src = rhs_src, .air_tag = .add_sat }; 7476 }, 7477 .sub => { 7478 // For integers: 7479 // If the rhs is zero, then the other operand is 7480 // returned, even if it is undefined. 7481 // If either of the operands are undefined, it's a compile error 7482 // because there is a possible value for which the subtraction would 7483 // overflow, causing illegal behavior. 7484 // For floats: either operand being undef makes the result undef. 7485 if (maybe_rhs_val) |rhs_val| { 7486 if (rhs_val.isUndef()) { 7487 if (is_int) { 7488 return sema.failWithUseOfUndef(block, rhs_src); 7489 } else { 7490 return sema.addConstUndef(scalar_type); 7491 } 7492 } 7493 if (rhs_val.compareWithZero(.eq)) { 7494 return casted_lhs; 7495 } 7496 } 7497 if (maybe_lhs_val) |lhs_val| { 7498 if (lhs_val.isUndef()) { 7499 if (is_int) { 7500 return sema.failWithUseOfUndef(block, lhs_src); 7501 } else { 7502 return sema.addConstUndef(scalar_type); 7503 } 7504 } 7505 if (maybe_rhs_val) |rhs_val| { 7506 if (is_int) { 7507 return sema.addConstant( 7508 scalar_type, 7509 try lhs_val.intSub(rhs_val, sema.arena), 7510 ); 7511 } else { 7512 return sema.addConstant( 7513 scalar_type, 7514 try lhs_val.floatSub(rhs_val, scalar_type, sema.arena), 7515 ); 7516 } 7517 } else break :rs .{ .src = rhs_src, .air_tag = .sub }; 7518 } else break :rs .{ .src = lhs_src, .air_tag = .sub }; 7519 }, 7520 .subwrap => { 7521 // Integers only; floats are checked above. 7522 // If the RHS is zero, then the other operand is returned, even if it is undefined. 7523 // If either of the operands are undefined, the result is undefined. 7524 if (maybe_rhs_val) |rhs_val| { 7525 if (rhs_val.isUndef()) { 7526 return sema.addConstUndef(scalar_type); 7527 } 7528 if (rhs_val.compareWithZero(.eq)) { 7529 return casted_lhs; 7530 } 7531 } 7532 if (maybe_lhs_val) |lhs_val| { 7533 if (lhs_val.isUndef()) { 7534 return sema.addConstUndef(scalar_type); 7535 } 7536 if (maybe_rhs_val) |rhs_val| { 7537 return sema.addConstant( 7538 scalar_type, 7539 try lhs_val.numberSubWrap(rhs_val, scalar_type, sema.arena, target), 7540 ); 7541 } else break :rs .{ .src = rhs_src, .air_tag = .subwrap }; 7542 } else break :rs .{ .src = lhs_src, .air_tag = .subwrap }; 7543 }, 7544 .sub_sat => { 7545 // Integers only; floats are checked above. 7546 // If the RHS is zero, result is LHS. 7547 // If either of the operands are undefined, result is undefined. 7548 if (maybe_rhs_val) |rhs_val| { 7549 if (rhs_val.isUndef()) { 7550 return sema.addConstUndef(scalar_type); 7551 } 7552 if (rhs_val.compareWithZero(.eq)) { 7553 return casted_lhs; 7554 } 7555 } 7556 if (maybe_lhs_val) |lhs_val| { 7557 if (lhs_val.isUndef()) { 7558 return sema.addConstUndef(scalar_type); 7559 } 7560 if (maybe_rhs_val) |rhs_val| { 7561 return sema.addConstant( 7562 scalar_type, 7563 try lhs_val.intSubSat(rhs_val, scalar_type, sema.arena, target), 7564 ); 7565 } else break :rs .{ .src = rhs_src, .air_tag = .sub_sat }; 7566 } else break :rs .{ .src = lhs_src, .air_tag = .sub_sat }; 7567 }, 7568 .div => { 7569 // TODO: emit compile error when .div is used on integers and there would be an 7570 // ambiguous result between div_floor and div_trunc. 7571 7572 // For integers: 7573 // If the lhs is zero, then zero is returned regardless of rhs. 7574 // If the rhs is zero, compile error for division by zero. 7575 // If the rhs is undefined, compile error because there is a possible 7576 // value (zero) for which the division would be illegal behavior. 7577 // If the lhs is undefined: 7578 // * if lhs type is signed: 7579 // * if rhs is comptime-known and not -1, result is undefined 7580 // * if rhs is -1 or runtime-known, compile error because there is a 7581 // possible value (-min_int / -1) for which division would be 7582 // illegal behavior. 7583 // * if lhs type is unsigned, undef is returned regardless of rhs. 7584 // TODO: emit runtime safety for division by zero 7585 // 7586 // For floats: 7587 // If the rhs is zero, compile error for division by zero. 7588 // If the rhs is undefined, compile error because there is a possible 7589 // value (zero) for which the division would be illegal behavior. 7590 // If the lhs is undefined, result is undefined. 7591 if (maybe_lhs_val) |lhs_val| { 7592 if (!lhs_val.isUndef()) { 7593 if (lhs_val.compareWithZero(.eq)) { 7594 return sema.addConstant(scalar_type, Value.zero); 7595 } 7596 } 7597 } 7598 if (maybe_rhs_val) |rhs_val| { 7599 if (rhs_val.isUndef()) { 7600 return sema.failWithUseOfUndef(block, rhs_src); 7601 } 7602 if (rhs_val.compareWithZero(.eq)) { 7603 return sema.failWithDivideByZero(block, rhs_src); 7604 } 7605 } 7606 if (maybe_lhs_val) |lhs_val| { 7607 if (lhs_val.isUndef()) { 7608 if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) { 7609 if (maybe_rhs_val) |rhs_val| { 7610 if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) { 7611 return sema.addConstUndef(scalar_type); 7612 } 7613 } 7614 return sema.failWithUseOfUndef(block, rhs_src); 7615 } 7616 return sema.addConstUndef(scalar_type); 7617 } 7618 7619 if (maybe_rhs_val) |rhs_val| { 7620 if (is_int) { 7621 return sema.addConstant( 7622 scalar_type, 7623 try lhs_val.intDiv(rhs_val, sema.arena), 7624 ); 7625 } else { 7626 return sema.addConstant( 7627 scalar_type, 7628 try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena), 7629 ); 7630 } 7631 } else { 7632 if (is_int) { 7633 break :rs .{ .src = rhs_src, .air_tag = .div_trunc }; 7634 } else { 7635 break :rs .{ .src = rhs_src, .air_tag = .div_float }; 7636 } 7637 } 7638 } else { 7639 if (is_int) { 7640 break :rs .{ .src = lhs_src, .air_tag = .div_trunc }; 7641 } else { 7642 break :rs .{ .src = lhs_src, .air_tag = .div_float }; 7643 } 7644 } 7645 }, 7646 .div_trunc => { 7647 // For integers: 7648 // If the lhs is zero, then zero is returned regardless of rhs. 7649 // If the rhs is zero, compile error for division by zero. 7650 // If the rhs is undefined, compile error because there is a possible 7651 // value (zero) for which the division would be illegal behavior. 7652 // If the lhs is undefined: 7653 // * if lhs type is signed: 7654 // * if rhs is comptime-known and not -1, result is undefined 7655 // * if rhs is -1 or runtime-known, compile error because there is a 7656 // possible value (-min_int / -1) for which division would be 7657 // illegal behavior. 7658 // * if lhs type is unsigned, undef is returned regardless of rhs. 7659 // TODO: emit runtime safety for division by zero 7660 // 7661 // For floats: 7662 // If the rhs is zero, compile error for division by zero. 7663 // If the rhs is undefined, compile error because there is a possible 7664 // value (zero) for which the division would be illegal behavior. 7665 // If the lhs is undefined, result is undefined. 7666 if (maybe_lhs_val) |lhs_val| { 7667 if (!lhs_val.isUndef()) { 7668 if (lhs_val.compareWithZero(.eq)) { 7669 return sema.addConstant(scalar_type, Value.zero); 7670 } 7671 } 7672 } 7673 if (maybe_rhs_val) |rhs_val| { 7674 if (rhs_val.isUndef()) { 7675 return sema.failWithUseOfUndef(block, rhs_src); 7676 } 7677 if (rhs_val.compareWithZero(.eq)) { 7678 return sema.failWithDivideByZero(block, rhs_src); 7679 } 7680 } 7681 if (maybe_lhs_val) |lhs_val| { 7682 if (lhs_val.isUndef()) { 7683 if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) { 7684 if (maybe_rhs_val) |rhs_val| { 7685 if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) { 7686 return sema.addConstUndef(scalar_type); 7687 } 7688 } 7689 return sema.failWithUseOfUndef(block, rhs_src); 7690 } 7691 return sema.addConstUndef(scalar_type); 7692 } 7693 7694 if (maybe_rhs_val) |rhs_val| { 7695 if (is_int) { 7696 return sema.addConstant( 7697 scalar_type, 7698 try lhs_val.intDiv(rhs_val, sema.arena), 7699 ); 7700 } else { 7701 return sema.addConstant( 7702 scalar_type, 7703 try lhs_val.floatDivTrunc(rhs_val, scalar_type, sema.arena), 7704 ); 7705 } 7706 } else break :rs .{ .src = rhs_src, .air_tag = .div_trunc }; 7707 } else break :rs .{ .src = lhs_src, .air_tag = .div_trunc }; 7708 }, 7709 .div_floor => { 7710 // For integers: 7711 // If the lhs is zero, then zero is returned regardless of rhs. 7712 // If the rhs is zero, compile error for division by zero. 7713 // If the rhs is undefined, compile error because there is a possible 7714 // value (zero) for which the division would be illegal behavior. 7715 // If the lhs is undefined: 7716 // * if lhs type is signed: 7717 // * if rhs is comptime-known and not -1, result is undefined 7718 // * if rhs is -1 or runtime-known, compile error because there is a 7719 // possible value (-min_int / -1) for which division would be 7720 // illegal behavior. 7721 // * if lhs type is unsigned, undef is returned regardless of rhs. 7722 // TODO: emit runtime safety for division by zero 7723 // 7724 // For floats: 7725 // If the rhs is zero, compile error for division by zero. 7726 // If the rhs is undefined, compile error because there is a possible 7727 // value (zero) for which the division would be illegal behavior. 7728 // If the lhs is undefined, result is undefined. 7729 if (maybe_lhs_val) |lhs_val| { 7730 if (!lhs_val.isUndef()) { 7731 if (lhs_val.compareWithZero(.eq)) { 7732 return sema.addConstant(scalar_type, Value.zero); 7733 } 7734 } 7735 } 7736 if (maybe_rhs_val) |rhs_val| { 7737 if (rhs_val.isUndef()) { 7738 return sema.failWithUseOfUndef(block, rhs_src); 7739 } 7740 if (rhs_val.compareWithZero(.eq)) { 7741 return sema.failWithDivideByZero(block, rhs_src); 7742 } 7743 } 7744 if (maybe_lhs_val) |lhs_val| { 7745 if (lhs_val.isUndef()) { 7746 if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) { 7747 if (maybe_rhs_val) |rhs_val| { 7748 if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) { 7749 return sema.addConstUndef(scalar_type); 7750 } 7751 } 7752 return sema.failWithUseOfUndef(block, rhs_src); 7753 } 7754 return sema.addConstUndef(scalar_type); 7755 } 7756 7757 if (maybe_rhs_val) |rhs_val| { 7758 if (is_int) { 7759 return sema.addConstant( 7760 scalar_type, 7761 try lhs_val.intDivFloor(rhs_val, sema.arena), 7762 ); 7763 } else { 7764 return sema.addConstant( 7765 scalar_type, 7766 try lhs_val.floatDivFloor(rhs_val, scalar_type, sema.arena), 7767 ); 7768 } 7769 } else break :rs .{ .src = rhs_src, .air_tag = .div_floor }; 7770 } else break :rs .{ .src = lhs_src, .air_tag = .div_floor }; 7771 }, 7772 .div_exact => { 7773 // For integers: 7774 // If the lhs is zero, then zero is returned regardless of rhs. 7775 // If the rhs is zero, compile error for division by zero. 7776 // If the rhs is undefined, compile error because there is a possible 7777 // value (zero) for which the division would be illegal behavior. 7778 // If the lhs is undefined, compile error because there is a possible 7779 // value for which the division would result in a remainder. 7780 // TODO: emit runtime safety for if there is a remainder 7781 // TODO: emit runtime safety for division by zero 7782 // 7783 // For floats: 7784 // If the rhs is zero, compile error for division by zero. 7785 // If the rhs is undefined, compile error because there is a possible 7786 // value (zero) for which the division would be illegal behavior. 7787 // If the lhs is undefined, compile error because there is a possible 7788 // value for which the division would result in a remainder. 7789 if (maybe_lhs_val) |lhs_val| { 7790 if (lhs_val.isUndef()) { 7791 return sema.failWithUseOfUndef(block, rhs_src); 7792 } else { 7793 if (lhs_val.compareWithZero(.eq)) { 7794 return sema.addConstant(scalar_type, Value.zero); 7795 } 7796 } 7797 } 7798 if (maybe_rhs_val) |rhs_val| { 7799 if (rhs_val.isUndef()) { 7800 return sema.failWithUseOfUndef(block, rhs_src); 7801 } 7802 if (rhs_val.compareWithZero(.eq)) { 7803 return sema.failWithDivideByZero(block, rhs_src); 7804 } 7805 } 7806 if (maybe_lhs_val) |lhs_val| { 7807 if (maybe_rhs_val) |rhs_val| { 7808 if (is_int) { 7809 // TODO: emit compile error if there is a remainder 7810 return sema.addConstant( 7811 scalar_type, 7812 try lhs_val.intDiv(rhs_val, sema.arena), 7813 ); 7814 } else { 7815 // TODO: emit compile error if there is a remainder 7816 return sema.addConstant( 7817 scalar_type, 7818 try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena), 7819 ); 7820 } 7821 } else break :rs .{ .src = rhs_src, .air_tag = .div_exact }; 7822 } else break :rs .{ .src = lhs_src, .air_tag = .div_exact }; 7823 }, 7824 .mul => { 7825 // For integers: 7826 // If either of the operands are zero, the result is zero. 7827 // If either of the operands are one, the result is the other 7828 // operand, even if it is undefined. 7829 // If either of the operands are undefined, it's a compile error 7830 // because there is a possible value for which the addition would 7831 // overflow (max_int), causing illegal behavior. 7832 // For floats: either operand being undef makes the result undef. 7833 if (maybe_lhs_val) |lhs_val| { 7834 if (!lhs_val.isUndef()) { 7835 if (lhs_val.compareWithZero(.eq)) { 7836 return sema.addConstant(scalar_type, Value.zero); 7837 } 7838 if (lhs_val.compare(.eq, Value.one, scalar_type)) { 7839 return casted_rhs; 7840 } 7841 } 7842 } 7843 if (maybe_rhs_val) |rhs_val| { 7844 if (rhs_val.isUndef()) { 7845 if (is_int) { 7846 return sema.failWithUseOfUndef(block, rhs_src); 7847 } else { 7848 return sema.addConstUndef(scalar_type); 7849 } 7850 } 7851 if (rhs_val.compareWithZero(.eq)) { 7852 return sema.addConstant(scalar_type, Value.zero); 7853 } 7854 if (rhs_val.compare(.eq, Value.one, scalar_type)) { 7855 return casted_lhs; 7856 } 7857 if (maybe_lhs_val) |lhs_val| { 7858 if (lhs_val.isUndef()) { 7859 if (is_int) { 7860 return sema.failWithUseOfUndef(block, lhs_src); 7861 } else { 7862 return sema.addConstUndef(scalar_type); 7863 } 7864 } 7865 if (is_int) { 7866 return sema.addConstant( 7867 scalar_type, 7868 try lhs_val.intMul(rhs_val, sema.arena), 7869 ); 7870 } else { 7871 return sema.addConstant( 7872 scalar_type, 7873 try lhs_val.floatMul(rhs_val, scalar_type, sema.arena), 7874 ); 7875 } 7876 } else break :rs .{ .src = lhs_src, .air_tag = .mul }; 7877 } else break :rs .{ .src = rhs_src, .air_tag = .mul }; 7878 }, 7879 .mulwrap => { 7880 // Integers only; floats are handled above. 7881 // If either of the operands are zero, result is zero. 7882 // If either of the operands are one, result is the other operand. 7883 // If either of the operands are undefined, result is undefined. 7884 if (maybe_lhs_val) |lhs_val| { 7885 if (!lhs_val.isUndef()) { 7886 if (lhs_val.compareWithZero(.eq)) { 7887 return sema.addConstant(scalar_type, Value.zero); 7888 } 7889 if (lhs_val.compare(.eq, Value.one, scalar_type)) { 7890 return casted_rhs; 7891 } 7892 } 7893 } 7894 if (maybe_rhs_val) |rhs_val| { 7895 if (rhs_val.isUndef()) { 7896 return sema.addConstUndef(scalar_type); 7897 } 7898 if (rhs_val.compareWithZero(.eq)) { 7899 return sema.addConstant(scalar_type, Value.zero); 7900 } 7901 if (rhs_val.compare(.eq, Value.one, scalar_type)) { 7902 return casted_lhs; 7903 } 7904 if (maybe_lhs_val) |lhs_val| { 7905 if (lhs_val.isUndef()) { 7906 return sema.addConstUndef(scalar_type); 7907 } 7908 return sema.addConstant( 7909 scalar_type, 7910 try lhs_val.numberMulWrap(rhs_val, scalar_type, sema.arena, target), 7911 ); 7912 } else break :rs .{ .src = lhs_src, .air_tag = .mulwrap }; 7913 } else break :rs .{ .src = rhs_src, .air_tag = .mulwrap }; 7914 }, 7915 .mul_sat => { 7916 // Integers only; floats are checked above. 7917 // If either of the operands are zero, result is zero. 7918 // If either of the operands are one, result is the other operand. 7919 // If either of the operands are undefined, result is undefined. 7920 if (maybe_lhs_val) |lhs_val| { 7921 if (!lhs_val.isUndef()) { 7922 if (lhs_val.compareWithZero(.eq)) { 7923 return sema.addConstant(scalar_type, Value.zero); 7924 } 7925 if (lhs_val.compare(.eq, Value.one, scalar_type)) { 7926 return casted_rhs; 7927 } 7928 } 7929 } 7930 if (maybe_rhs_val) |rhs_val| { 7931 if (rhs_val.isUndef()) { 7932 return sema.addConstUndef(scalar_type); 7933 } 7934 if (rhs_val.compareWithZero(.eq)) { 7935 return sema.addConstant(scalar_type, Value.zero); 7936 } 7937 if (rhs_val.compare(.eq, Value.one, scalar_type)) { 7938 return casted_lhs; 7939 } 7940 if (maybe_lhs_val) |lhs_val| { 7941 if (lhs_val.isUndef()) { 7942 return sema.addConstUndef(scalar_type); 7943 } 7944 return sema.addConstant( 7945 scalar_type, 7946 try lhs_val.intMulSat(rhs_val, scalar_type, sema.arena, target), 7947 ); 7948 } else break :rs .{ .src = lhs_src, .air_tag = .mul_sat }; 7949 } else break :rs .{ .src = rhs_src, .air_tag = .mul_sat }; 7950 }, 7951 .mod_rem => { 7952 // For integers: 7953 // Either operand being undef is a compile error because there exists 7954 // a possible value (TODO what is it?) that would invoke illegal behavior. 7955 // TODO: can lhs zero be handled better? 7956 // TODO: can lhs undef be handled better? 7957 // 7958 // For floats: 7959 // If the rhs is zero, compile error for division by zero. 7960 // If the rhs is undefined, compile error because there is a possible 7961 // value (zero) for which the division would be illegal behavior. 7962 // If the lhs is undefined, result is undefined. 7963 // 7964 // For either one: if the result would be different between @mod and @rem, 7965 // then emit a compile error saying you have to pick one. 7966 if (is_int) { 7967 if (maybe_lhs_val) |lhs_val| { 7968 if (lhs_val.isUndef()) { 7969 return sema.failWithUseOfUndef(block, lhs_src); 7970 } 7971 if (lhs_val.compareWithZero(.lt)) { 7972 return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); 7973 } 7974 } else if (lhs_ty.isSignedInt()) { 7975 return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); 7976 } 7977 if (maybe_rhs_val) |rhs_val| { 7978 if (rhs_val.isUndef()) { 7979 return sema.failWithUseOfUndef(block, rhs_src); 7980 } 7981 if (rhs_val.compareWithZero(.eq)) { 7982 return sema.failWithDivideByZero(block, rhs_src); 7983 } 7984 if (rhs_val.compareWithZero(.lt)) { 7985 return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); 7986 } 7987 if (maybe_lhs_val) |lhs_val| { 7988 return sema.addConstant( 7989 scalar_type, 7990 try lhs_val.intRem(rhs_val, sema.arena), 7991 ); 7992 } 7993 break :rs .{ .src = lhs_src, .air_tag = .rem }; 7994 } else if (rhs_ty.isSignedInt()) { 7995 return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); 7996 } else { 7997 break :rs .{ .src = rhs_src, .air_tag = .rem }; 7998 } 7999 } 8000 // float operands 8001 if (maybe_rhs_val) |rhs_val| { 8002 if (rhs_val.isUndef()) { 8003 return sema.failWithUseOfUndef(block, rhs_src); 8004 } 8005 if (rhs_val.compareWithZero(.eq)) { 8006 return sema.failWithDivideByZero(block, rhs_src); 8007 } 8008 if (rhs_val.compareWithZero(.lt)) { 8009 return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); 8010 } 8011 if (maybe_lhs_val) |lhs_val| { 8012 if (lhs_val.isUndef() or lhs_val.compareWithZero(.lt)) { 8013 return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); 8014 } 8015 return sema.addConstant( 8016 scalar_type, 8017 try lhs_val.floatRem(rhs_val, sema.arena), 8018 ); 8019 } else { 8020 return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); 8021 } 8022 } else { 8023 return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); 8024 } 8025 }, 8026 .rem => { 8027 // For integers: 8028 // Either operand being undef is a compile error because there exists 8029 // a possible value (TODO what is it?) that would invoke illegal behavior. 8030 // TODO: can lhs zero be handled better? 8031 // TODO: can lhs undef be handled better? 8032 // 8033 // For floats: 8034 // If the rhs is zero, compile error for division by zero. 8035 // If the rhs is undefined, compile error because there is a possible 8036 // value (zero) for which the division would be illegal behavior. 8037 // If the lhs is undefined, result is undefined. 8038 if (is_int) { 8039 if (maybe_lhs_val) |lhs_val| { 8040 if (lhs_val.isUndef()) { 8041 return sema.failWithUseOfUndef(block, lhs_src); 8042 } 8043 } 8044 if (maybe_rhs_val) |rhs_val| { 8045 if (rhs_val.isUndef()) { 8046 return sema.failWithUseOfUndef(block, rhs_src); 8047 } 8048 if (rhs_val.compareWithZero(.eq)) { 8049 return sema.failWithDivideByZero(block, rhs_src); 8050 } 8051 if (maybe_lhs_val) |lhs_val| { 8052 return sema.addConstant( 8053 scalar_type, 8054 try lhs_val.intRem(rhs_val, sema.arena), 8055 ); 8056 } 8057 break :rs .{ .src = lhs_src, .air_tag = .rem }; 8058 } else { 8059 break :rs .{ .src = rhs_src, .air_tag = .rem }; 8060 } 8061 } 8062 // float operands 8063 if (maybe_rhs_val) |rhs_val| { 8064 if (rhs_val.isUndef()) { 8065 return sema.failWithUseOfUndef(block, rhs_src); 8066 } 8067 if (rhs_val.compareWithZero(.eq)) { 8068 return sema.failWithDivideByZero(block, rhs_src); 8069 } 8070 } 8071 if (maybe_lhs_val) |lhs_val| { 8072 if (lhs_val.isUndef()) { 8073 return sema.addConstUndef(scalar_type); 8074 } 8075 if (maybe_rhs_val) |rhs_val| { 8076 return sema.addConstant( 8077 scalar_type, 8078 try lhs_val.floatRem(rhs_val, sema.arena), 8079 ); 8080 } else break :rs .{ .src = rhs_src, .air_tag = .rem }; 8081 } else break :rs .{ .src = lhs_src, .air_tag = .rem }; 8082 }, 8083 .mod => { 8084 // For integers: 8085 // Either operand being undef is a compile error because there exists 8086 // a possible value (TODO what is it?) that would invoke illegal behavior. 8087 // TODO: can lhs zero be handled better? 8088 // TODO: can lhs undef be handled better? 8089 // 8090 // For floats: 8091 // If the rhs is zero, compile error for division by zero. 8092 // If the rhs is undefined, compile error because there is a possible 8093 // value (zero) for which the division would be illegal behavior. 8094 // If the lhs is undefined, result is undefined. 8095 if (is_int) { 8096 if (maybe_lhs_val) |lhs_val| { 8097 if (lhs_val.isUndef()) { 8098 return sema.failWithUseOfUndef(block, lhs_src); 8099 } 8100 } 8101 if (maybe_rhs_val) |rhs_val| { 8102 if (rhs_val.isUndef()) { 8103 return sema.failWithUseOfUndef(block, rhs_src); 8104 } 8105 if (rhs_val.compareWithZero(.eq)) { 8106 return sema.failWithDivideByZero(block, rhs_src); 8107 } 8108 if (maybe_lhs_val) |lhs_val| { 8109 return sema.addConstant( 8110 scalar_type, 8111 try lhs_val.intMod(rhs_val, sema.arena), 8112 ); 8113 } 8114 break :rs .{ .src = lhs_src, .air_tag = .mod }; 8115 } else { 8116 break :rs .{ .src = rhs_src, .air_tag = .mod }; 8117 } 8118 } 8119 // float operands 8120 if (maybe_rhs_val) |rhs_val| { 8121 if (rhs_val.isUndef()) { 8122 return sema.failWithUseOfUndef(block, rhs_src); 8123 } 8124 if (rhs_val.compareWithZero(.eq)) { 8125 return sema.failWithDivideByZero(block, rhs_src); 8126 } 8127 } 8128 if (maybe_lhs_val) |lhs_val| { 8129 if (lhs_val.isUndef()) { 8130 return sema.addConstUndef(scalar_type); 8131 } 8132 if (maybe_rhs_val) |rhs_val| { 8133 return sema.addConstant( 8134 scalar_type, 8135 try lhs_val.floatMod(rhs_val, sema.arena), 8136 ); 8137 } else break :rs .{ .src = rhs_src, .air_tag = .mod }; 8138 } else break :rs .{ .src = lhs_src, .air_tag = .mod }; 8139 }, 8140 else => unreachable, 8141 } 8142 }; 8143 8144 try sema.requireRuntimeBlock(block, rs.src); 8145 return block.addBinOp(rs.air_tag, casted_lhs, casted_rhs); 8146} 8147 8148fn analyzePtrArithmetic( 8149 sema: *Sema, 8150 block: *Block, 8151 op_src: LazySrcLoc, 8152 ptr: Air.Inst.Ref, 8153 uncasted_offset: Air.Inst.Ref, 8154 air_tag: Air.Inst.Tag, 8155 ptr_src: LazySrcLoc, 8156 offset_src: LazySrcLoc, 8157) CompileError!Air.Inst.Ref { 8158 // TODO if the operand is comptime-known to be negative, or is a negative int, 8159 // coerce to isize instead of usize. 8160 const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src); 8161 // TODO adjust the return type according to alignment and other factors 8162 const runtime_src = rs: { 8163 if (try sema.resolveMaybeUndefVal(block, ptr_src, ptr)) |ptr_val| { 8164 if (try sema.resolveMaybeUndefVal(block, offset_src, offset)) |offset_val| { 8165 const ptr_ty = sema.typeOf(ptr); 8166 const new_ptr_ty = ptr_ty; // TODO modify alignment 8167 8168 if (ptr_val.isUndef() or offset_val.isUndef()) { 8169 return sema.addConstUndef(new_ptr_ty); 8170 } 8171 8172 const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt()); 8173 if (ptr_val.getUnsignedInt()) |addr| { 8174 const target = sema.mod.getTarget(); 8175 const ptr_child_ty = ptr_ty.childType(); 8176 const elem_ty = if (ptr_ty.isSinglePointer() and ptr_child_ty.zigTypeTag() == .Array) 8177 ptr_child_ty.childType() 8178 else 8179 ptr_child_ty; 8180 8181 const elem_size = elem_ty.abiSize(target); 8182 const new_addr = switch (air_tag) { 8183 .ptr_add => addr + elem_size * offset_int, 8184 .ptr_sub => addr - elem_size * offset_int, 8185 else => unreachable, 8186 }; 8187 const new_ptr_val = try Value.Tag.int_u64.create(sema.arena, new_addr); 8188 return sema.addConstant(new_ptr_ty, new_ptr_val); 8189 } 8190 if (air_tag == .ptr_sub) { 8191 return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{}); 8192 } 8193 const new_ptr_val = try ptr_val.elemPtr(sema.arena, offset_int); 8194 return sema.addConstant(new_ptr_ty, new_ptr_val); 8195 } else break :rs offset_src; 8196 } else break :rs ptr_src; 8197 }; 8198 8199 try sema.requireRuntimeBlock(block, runtime_src); 8200 return block.addBinOp(air_tag, ptr, offset); 8201} 8202 8203fn zirLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 8204 const tracy = trace(@src()); 8205 defer tracy.end(); 8206 8207 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 8208 const src = inst_data.src(); 8209 const ptr_src: LazySrcLoc = .{ .node_offset_deref_ptr = inst_data.src_node }; 8210 const ptr = sema.resolveInst(inst_data.operand); 8211 return sema.analyzeLoad(block, src, ptr, ptr_src); 8212} 8213 8214fn zirAsm( 8215 sema: *Sema, 8216 block: *Block, 8217 extended: Zir.Inst.Extended.InstData, 8218 inst: Zir.Inst.Index, 8219) CompileError!Air.Inst.Ref { 8220 const tracy = trace(@src()); 8221 defer tracy.end(); 8222 8223 const extra = sema.code.extraData(Zir.Inst.Asm, extended.operand); 8224 const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; 8225 const ret_ty_src: LazySrcLoc = .{ .node_offset_asm_ret_ty = extra.data.src_node }; 8226 const outputs_len = @truncate(u5, extended.small); 8227 const inputs_len = @truncate(u5, extended.small >> 5); 8228 const clobbers_len = @truncate(u5, extended.small >> 10); 8229 8230 if (extra.data.asm_source == 0) { 8231 // This can move to become an AstGen error after inline assembly improvements land 8232 // and stage1 code matches stage2 code. 8233 return sema.fail(block, src, "assembly code must use string literal syntax", .{}); 8234 } 8235 8236 if (outputs_len > 1) { 8237 return sema.fail(block, src, "TODO implement Sema for asm with more than 1 output", .{}); 8238 } 8239 8240 var extra_i = extra.end; 8241 var output_type_bits = extra.data.output_type_bits; 8242 8243 const Output = struct { constraint: []const u8, ty: Type }; 8244 const output: ?Output = if (outputs_len == 0) null else blk: { 8245 const output = sema.code.extraData(Zir.Inst.Asm.Output, extra_i); 8246 extra_i = output.end; 8247 8248 const is_type = @truncate(u1, output_type_bits) != 0; 8249 output_type_bits >>= 1; 8250 8251 if (!is_type) { 8252 return sema.fail(block, src, "TODO implement Sema for asm with non `->` output", .{}); 8253 } 8254 8255 const constraint = sema.code.nullTerminatedString(output.data.constraint); 8256 break :blk Output{ 8257 .constraint = constraint, 8258 .ty = try sema.resolveType(block, ret_ty_src, output.data.operand), 8259 }; 8260 }; 8261 8262 const args = try sema.arena.alloc(Air.Inst.Ref, inputs_len); 8263 const inputs = try sema.arena.alloc([]const u8, inputs_len); 8264 8265 for (args) |*arg, arg_i| { 8266 const input = sema.code.extraData(Zir.Inst.Asm.Input, extra_i); 8267 extra_i = input.end; 8268 8269 const name = sema.code.nullTerminatedString(input.data.name); 8270 _ = name; // TODO: use the name 8271 8272 arg.* = sema.resolveInst(input.data.operand); 8273 inputs[arg_i] = sema.code.nullTerminatedString(input.data.constraint); 8274 } 8275 8276 const clobbers = try sema.arena.alloc([]const u8, clobbers_len); 8277 for (clobbers) |*name| { 8278 name.* = sema.code.nullTerminatedString(sema.code.extra[extra_i]); 8279 extra_i += 1; 8280 } 8281 8282 try sema.requireRuntimeBlock(block, src); 8283 const gpa = sema.gpa; 8284 try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Asm).Struct.fields.len + args.len); 8285 const asm_air = try block.addInst(.{ 8286 .tag = .assembly, 8287 .data = .{ .ty_pl = .{ 8288 .ty = if (output) |o| try sema.addType(o.ty) else Air.Inst.Ref.void_type, 8289 .payload = sema.addExtraAssumeCapacity(Air.Asm{ 8290 .zir_index = inst, 8291 }), 8292 } }, 8293 }); 8294 sema.appendRefsAssumeCapacity(args); 8295 return asm_air; 8296} 8297 8298/// Only called for equality operators. See also `zirCmp`. 8299fn zirCmpEq( 8300 sema: *Sema, 8301 block: *Block, 8302 inst: Zir.Inst.Index, 8303 op: std.math.CompareOperator, 8304 air_tag: Air.Inst.Tag, 8305) CompileError!Air.Inst.Ref { 8306 const tracy = trace(@src()); 8307 defer tracy.end(); 8308 8309 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 8310 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 8311 const src: LazySrcLoc = inst_data.src(); 8312 const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; 8313 const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; 8314 const lhs = sema.resolveInst(extra.lhs); 8315 const rhs = sema.resolveInst(extra.rhs); 8316 8317 const lhs_ty = sema.typeOf(lhs); 8318 const rhs_ty = sema.typeOf(rhs); 8319 const lhs_ty_tag = lhs_ty.zigTypeTag(); 8320 const rhs_ty_tag = rhs_ty.zigTypeTag(); 8321 if (lhs_ty_tag == .Null and rhs_ty_tag == .Null) { 8322 // null == null, null != null 8323 if (op == .eq) { 8324 return Air.Inst.Ref.bool_true; 8325 } else { 8326 return Air.Inst.Ref.bool_false; 8327 } 8328 } 8329 if (((lhs_ty_tag == .Null and rhs_ty_tag == .Optional) or 8330 rhs_ty_tag == .Null and lhs_ty_tag == .Optional)) 8331 { 8332 // comparing null with optionals 8333 const opt_operand = if (lhs_ty_tag == .Null) rhs else lhs; 8334 return sema.analyzeIsNull(block, src, opt_operand, op == .neq); 8335 } 8336 if (((lhs_ty_tag == .Null and rhs_ty.isCPtr()) or (rhs_ty_tag == .Null and lhs_ty.isCPtr()))) { 8337 // comparing null with C pointers 8338 const opt_operand = if (lhs_ty_tag == .Null) rhs else lhs; 8339 return sema.analyzeIsNull(block, src, opt_operand, op == .neq); 8340 } 8341 if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) { 8342 const non_null_type = if (lhs_ty_tag == .Null) rhs_ty else lhs_ty; 8343 return sema.fail(block, src, "comparison of '{}' with null", .{non_null_type}); 8344 } 8345 if (lhs_ty_tag == .EnumLiteral and rhs_ty_tag == .Union) { 8346 return sema.analyzeCmpUnionTag(block, rhs, rhs_src, lhs, lhs_src, op); 8347 } 8348 if (rhs_ty_tag == .EnumLiteral and lhs_ty_tag == .Union) { 8349 return sema.analyzeCmpUnionTag(block, lhs, lhs_src, rhs, rhs_src, op); 8350 } 8351 if (lhs_ty_tag == .ErrorSet and rhs_ty_tag == .ErrorSet) { 8352 const runtime_src: LazySrcLoc = src: { 8353 if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lval| { 8354 if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rval| { 8355 if (lval.isUndef() or rval.isUndef()) { 8356 return sema.addConstUndef(Type.initTag(.bool)); 8357 } 8358 // TODO optimisation opportunity: evaluate if mem.eql is faster with the names, 8359 // or calling to Module.getErrorValue to get the values and then compare them is 8360 // faster. 8361 const lhs_name = lval.castTag(.@"error").?.data.name; 8362 const rhs_name = rval.castTag(.@"error").?.data.name; 8363 if (mem.eql(u8, lhs_name, rhs_name) == (op == .eq)) { 8364 return Air.Inst.Ref.bool_true; 8365 } else { 8366 return Air.Inst.Ref.bool_false; 8367 } 8368 } else { 8369 break :src rhs_src; 8370 } 8371 } else { 8372 break :src lhs_src; 8373 } 8374 }; 8375 try sema.requireRuntimeBlock(block, runtime_src); 8376 return block.addBinOp(air_tag, lhs, rhs); 8377 } 8378 if (lhs_ty_tag == .Type and rhs_ty_tag == .Type) { 8379 const lhs_as_type = try sema.analyzeAsType(block, lhs_src, lhs); 8380 const rhs_as_type = try sema.analyzeAsType(block, rhs_src, rhs); 8381 if (lhs_as_type.eql(rhs_as_type) == (op == .eq)) { 8382 return Air.Inst.Ref.bool_true; 8383 } else { 8384 return Air.Inst.Ref.bool_false; 8385 } 8386 } 8387 return sema.analyzeCmp(block, src, lhs, rhs, op, lhs_src, rhs_src, true); 8388} 8389 8390fn analyzeCmpUnionTag( 8391 sema: *Sema, 8392 block: *Block, 8393 un: Air.Inst.Ref, 8394 un_src: LazySrcLoc, 8395 tag: Air.Inst.Ref, 8396 tag_src: LazySrcLoc, 8397 op: std.math.CompareOperator, 8398) CompileError!Air.Inst.Ref { 8399 const union_ty = try sema.resolveTypeFields(block, un_src, sema.typeOf(un)); 8400 const union_tag_ty = union_ty.unionTagType() orelse { 8401 // TODO note at declaration site that says "union foo is not tagged" 8402 return sema.fail(block, un_src, "comparison of union and enum literal is only valid for tagged union types", .{}); 8403 }; 8404 // Coerce both the union and the tag to the union's tag type, and then execute the 8405 // enum comparison codepath. 8406 const coerced_tag = try sema.coerce(block, union_tag_ty, tag, tag_src); 8407 const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src); 8408 8409 return sema.cmpSelf(block, coerced_union, coerced_tag, op, un_src, tag_src); 8410} 8411 8412/// Only called for non-equality operators. See also `zirCmpEq`. 8413fn zirCmp( 8414 sema: *Sema, 8415 block: *Block, 8416 inst: Zir.Inst.Index, 8417 op: std.math.CompareOperator, 8418) CompileError!Air.Inst.Ref { 8419 const tracy = trace(@src()); 8420 defer tracy.end(); 8421 8422 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 8423 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 8424 const src: LazySrcLoc = inst_data.src(); 8425 const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; 8426 const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; 8427 const lhs = sema.resolveInst(extra.lhs); 8428 const rhs = sema.resolveInst(extra.rhs); 8429 return sema.analyzeCmp(block, src, lhs, rhs, op, lhs_src, rhs_src, false); 8430} 8431 8432fn analyzeCmp( 8433 sema: *Sema, 8434 block: *Block, 8435 src: LazySrcLoc, 8436 lhs: Air.Inst.Ref, 8437 rhs: Air.Inst.Ref, 8438 op: std.math.CompareOperator, 8439 lhs_src: LazySrcLoc, 8440 rhs_src: LazySrcLoc, 8441 is_equality_cmp: bool, 8442) CompileError!Air.Inst.Ref { 8443 const lhs_ty = sema.typeOf(lhs); 8444 const rhs_ty = sema.typeOf(rhs); 8445 if (lhs_ty.isNumeric() and rhs_ty.isNumeric()) { 8446 // This operation allows any combination of integer and float types, regardless of the 8447 // signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for 8448 // numeric types. 8449 return sema.cmpNumeric(block, src, lhs, rhs, op, lhs_src, rhs_src); 8450 } 8451 const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; 8452 const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]LazySrcLoc{ lhs_src, rhs_src } }); 8453 if (!resolved_type.isSelfComparable(is_equality_cmp)) { 8454 return sema.fail(block, src, "{s} operator not allowed for type '{}'", .{ 8455 @tagName(op), resolved_type, 8456 }); 8457 } 8458 const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); 8459 const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); 8460 return sema.cmpSelf(block, casted_lhs, casted_rhs, op, lhs_src, rhs_src); 8461} 8462 8463fn cmpSelf( 8464 sema: *Sema, 8465 block: *Block, 8466 casted_lhs: Air.Inst.Ref, 8467 casted_rhs: Air.Inst.Ref, 8468 op: std.math.CompareOperator, 8469 lhs_src: LazySrcLoc, 8470 rhs_src: LazySrcLoc, 8471) CompileError!Air.Inst.Ref { 8472 const resolved_type = sema.typeOf(casted_lhs); 8473 const runtime_src: LazySrcLoc = src: { 8474 if (try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs)) |lhs_val| { 8475 if (lhs_val.isUndef()) return sema.addConstUndef(resolved_type); 8476 if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| { 8477 if (rhs_val.isUndef()) return sema.addConstUndef(resolved_type); 8478 8479 if (lhs_val.compare(op, rhs_val, resolved_type)) { 8480 return Air.Inst.Ref.bool_true; 8481 } else { 8482 return Air.Inst.Ref.bool_false; 8483 } 8484 } else { 8485 if (resolved_type.zigTypeTag() == .Bool) { 8486 // We can lower bool eq/neq more efficiently. 8487 return sema.runtimeBoolCmp(block, op, casted_rhs, lhs_val.toBool(), rhs_src); 8488 } 8489 break :src rhs_src; 8490 } 8491 } else { 8492 // For bools, we still check the other operand, because we can lower 8493 // bool eq/neq more efficiently. 8494 if (resolved_type.zigTypeTag() == .Bool) { 8495 if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| { 8496 if (rhs_val.isUndef()) return sema.addConstUndef(resolved_type); 8497 return sema.runtimeBoolCmp(block, op, casted_lhs, rhs_val.toBool(), lhs_src); 8498 } 8499 } 8500 break :src lhs_src; 8501 } 8502 }; 8503 try sema.requireRuntimeBlock(block, runtime_src); 8504 8505 const tag: Air.Inst.Tag = switch (op) { 8506 .lt => .cmp_lt, 8507 .lte => .cmp_lte, 8508 .eq => .cmp_eq, 8509 .gte => .cmp_gte, 8510 .gt => .cmp_gt, 8511 .neq => .cmp_neq, 8512 }; 8513 // TODO handle vectors 8514 return block.addBinOp(tag, casted_lhs, casted_rhs); 8515} 8516 8517/// cmp_eq (x, false) => not(x) 8518/// cmp_eq (x, true ) => x 8519/// cmp_neq(x, false) => x 8520/// cmp_neq(x, true ) => not(x) 8521fn runtimeBoolCmp( 8522 sema: *Sema, 8523 block: *Block, 8524 op: std.math.CompareOperator, 8525 lhs: Air.Inst.Ref, 8526 rhs: bool, 8527 runtime_src: LazySrcLoc, 8528) CompileError!Air.Inst.Ref { 8529 if ((op == .neq) == rhs) { 8530 try sema.requireRuntimeBlock(block, runtime_src); 8531 return block.addTyOp(.not, Type.initTag(.bool), lhs); 8532 } else { 8533 return lhs; 8534 } 8535} 8536 8537fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 8538 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 8539 const src = inst_data.src(); 8540 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 8541 const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); 8542 try sema.resolveTypeLayout(block, src, operand_ty); 8543 const target = sema.mod.getTarget(); 8544 const abi_size = switch (operand_ty.zigTypeTag()) { 8545 .Fn => unreachable, 8546 .NoReturn, 8547 .Undefined, 8548 .Null, 8549 .BoundFn, 8550 .Opaque, 8551 => return sema.fail(block, src, "no size available for type '{}'", .{operand_ty}), 8552 .Type, 8553 .EnumLiteral, 8554 .ComptimeFloat, 8555 .ComptimeInt, 8556 .Void, 8557 => 0, 8558 8559 .Bool, 8560 .Int, 8561 .Float, 8562 .Pointer, 8563 .Array, 8564 .Struct, 8565 .Optional, 8566 .ErrorUnion, 8567 .ErrorSet, 8568 .Enum, 8569 .Union, 8570 .Vector, 8571 .Frame, 8572 .AnyFrame, 8573 => operand_ty.abiSize(target), 8574 }; 8575 return sema.addIntUnsigned(Type.initTag(.comptime_int), abi_size); 8576} 8577 8578fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 8579 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 8580 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 8581 const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); 8582 const target = sema.mod.getTarget(); 8583 const bit_size = operand_ty.bitSize(target); 8584 return sema.addIntUnsigned(Type.initTag(.comptime_int), bit_size); 8585} 8586 8587fn zirThis( 8588 sema: *Sema, 8589 block: *Block, 8590 extended: Zir.Inst.Extended.InstData, 8591) CompileError!Air.Inst.Ref { 8592 const this_decl = block.namespace.getDecl(); 8593 const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; 8594 return sema.analyzeDeclVal(block, src, this_decl); 8595} 8596 8597fn zirClosureCapture( 8598 sema: *Sema, 8599 block: *Block, 8600 inst: Zir.Inst.Index, 8601) CompileError!void { 8602 // TODO: Compile error when closed over values are modified 8603 const inst_data = sema.code.instructions.items(.data)[inst].un_tok; 8604 const tv = try sema.resolveInstConst(block, inst_data.src(), inst_data.operand); 8605 try block.wip_capture_scope.captures.putNoClobber(sema.gpa, inst, .{ 8606 .ty = try tv.ty.copy(sema.perm_arena), 8607 .val = try tv.val.copy(sema.perm_arena), 8608 }); 8609} 8610 8611fn zirClosureGet( 8612 sema: *Sema, 8613 block: *Block, 8614 inst: Zir.Inst.Index, 8615) CompileError!Air.Inst.Ref { 8616 // TODO CLOSURE: Test this with inline functions 8617 const inst_data = sema.code.instructions.items(.data)[inst].inst_node; 8618 var scope: *CaptureScope = block.src_decl.src_scope.?; 8619 // Note: The target closure must be in this scope list. 8620 // If it's not here, the zir is invalid, or the list is broken. 8621 const tv = while (true) { 8622 // Note: We don't need to add a dependency here, because 8623 // decls always depend on their lexical parents. 8624 if (scope.captures.getPtr(inst_data.inst)) |tv| { 8625 break tv; 8626 } 8627 scope = scope.parent.?; 8628 } else unreachable; 8629 8630 return sema.addConstant(tv.ty, tv.val); 8631} 8632 8633fn zirRetAddr( 8634 sema: *Sema, 8635 block: *Block, 8636 extended: Zir.Inst.Extended.InstData, 8637) CompileError!Air.Inst.Ref { 8638 const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; 8639 return sema.fail(block, src, "TODO: implement Sema.zirRetAddr", .{}); 8640} 8641 8642fn zirBuiltinSrc( 8643 sema: *Sema, 8644 block: *Block, 8645 extended: Zir.Inst.Extended.InstData, 8646) CompileError!Air.Inst.Ref { 8647 const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; 8648 return sema.fail(block, src, "TODO: implement Sema.zirBuiltinSrc", .{}); 8649} 8650 8651fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 8652 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 8653 const src = inst_data.src(); 8654 const ty = try sema.resolveType(block, src, inst_data.operand); 8655 const type_info_ty = try sema.getBuiltinType(block, src, "TypeInfo"); 8656 const target = sema.mod.getTarget(); 8657 8658 switch (ty.zigTypeTag()) { 8659 .Type => return sema.addConstant( 8660 type_info_ty, 8661 try Value.Tag.@"union".create(sema.arena, .{ 8662 .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Type)), 8663 .val = Value.initTag(.unreachable_value), 8664 }), 8665 ), 8666 .Void => return sema.addConstant( 8667 type_info_ty, 8668 try Value.Tag.@"union".create(sema.arena, .{ 8669 .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Void)), 8670 .val = Value.initTag(.unreachable_value), 8671 }), 8672 ), 8673 .Bool => return sema.addConstant( 8674 type_info_ty, 8675 try Value.Tag.@"union".create(sema.arena, .{ 8676 .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Bool)), 8677 .val = Value.initTag(.unreachable_value), 8678 }), 8679 ), 8680 .NoReturn => return sema.addConstant( 8681 type_info_ty, 8682 try Value.Tag.@"union".create(sema.arena, .{ 8683 .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.NoReturn)), 8684 .val = Value.initTag(.unreachable_value), 8685 }), 8686 ), 8687 .ComptimeFloat => return sema.addConstant( 8688 type_info_ty, 8689 try Value.Tag.@"union".create(sema.arena, .{ 8690 .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ComptimeFloat)), 8691 .val = Value.initTag(.unreachable_value), 8692 }), 8693 ), 8694 .ComptimeInt => return sema.addConstant( 8695 type_info_ty, 8696 try Value.Tag.@"union".create(sema.arena, .{ 8697 .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ComptimeInt)), 8698 .val = Value.initTag(.unreachable_value), 8699 }), 8700 ), 8701 .Undefined => return sema.addConstant( 8702 type_info_ty, 8703 try Value.Tag.@"union".create(sema.arena, .{ 8704 .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Undefined)), 8705 .val = Value.initTag(.unreachable_value), 8706 }), 8707 ), 8708 .Null => return sema.addConstant( 8709 type_info_ty, 8710 try Value.Tag.@"union".create(sema.arena, .{ 8711 .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Null)), 8712 .val = Value.initTag(.unreachable_value), 8713 }), 8714 ), 8715 .EnumLiteral => return sema.addConstant( 8716 type_info_ty, 8717 try Value.Tag.@"union".create(sema.arena, .{ 8718 .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.EnumLiteral)), 8719 .val = Value.initTag(.unreachable_value), 8720 }), 8721 ), 8722 .Fn => { 8723 const info = ty.fnInfo(); 8724 const field_values = try sema.arena.alloc(Value, 6); 8725 // calling_convention: CallingConvention, 8726 field_values[0] = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.cc)); 8727 // alignment: comptime_int, 8728 field_values[1] = try Value.Tag.int_u64.create(sema.arena, ty.abiAlignment(target)); 8729 // is_generic: bool, 8730 field_values[2] = if (info.is_generic) Value.initTag(.bool_true) else Value.initTag(.bool_false); 8731 // is_var_args: bool, 8732 field_values[3] = if (info.is_var_args) Value.initTag(.bool_true) else Value.initTag(.bool_false); 8733 // return_type: ?type, 8734 field_values[4] = try Value.Tag.ty.create(sema.arena, ty.fnReturnType()); 8735 // args: []const FnArg, 8736 field_values[5] = Value.@"null"; // TODO 8737 8738 return sema.addConstant( 8739 type_info_ty, 8740 try Value.Tag.@"union".create(sema.arena, .{ 8741 .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Fn)), 8742 .val = try Value.Tag.@"struct".create(sema.arena, field_values), 8743 }), 8744 ); 8745 }, 8746 .Int => { 8747 const info = ty.intInfo(target); 8748 const field_values = try sema.arena.alloc(Value, 2); 8749 // signedness: Signedness, 8750 field_values[0] = try Value.Tag.enum_field_index.create( 8751 sema.arena, 8752 @enumToInt(info.signedness), 8753 ); 8754 // bits: comptime_int, 8755 field_values[1] = try Value.Tag.int_u64.create(sema.arena, info.bits); 8756 8757 return sema.addConstant( 8758 type_info_ty, 8759 try Value.Tag.@"union".create(sema.arena, .{ 8760 .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Int)), 8761 .val = try Value.Tag.@"struct".create(sema.arena, field_values), 8762 }), 8763 ); 8764 }, 8765 .Float => { 8766 const field_values = try sema.arena.alloc(Value, 1); 8767 // bits: comptime_int, 8768 field_values[0] = try Value.Tag.int_u64.create(sema.arena, ty.bitSize(target)); 8769 8770 return sema.addConstant( 8771 type_info_ty, 8772 try Value.Tag.@"union".create(sema.arena, .{ 8773 .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Float)), 8774 .val = try Value.Tag.@"struct".create(sema.arena, field_values), 8775 }), 8776 ); 8777 }, 8778 .Pointer => { 8779 const info = ty.ptrInfo().data; 8780 const field_values = try sema.arena.alloc(Value, 7); 8781 // size: Size, 8782 field_values[0] = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.size)); 8783 // is_const: bool, 8784 field_values[1] = if (!info.mutable) Value.initTag(.bool_true) else Value.initTag(.bool_false); 8785 // is_volatile: bool, 8786 field_values[2] = if (info.@"volatile") Value.initTag(.bool_true) else Value.initTag(.bool_false); 8787 // alignment: comptime_int, 8788 field_values[3] = try Value.Tag.int_u64.create(sema.arena, info.@"align"); 8789 // child: type, 8790 field_values[4] = try Value.Tag.ty.create(sema.arena, info.pointee_type); 8791 // is_allowzero: bool, 8792 field_values[5] = if (info.@"allowzero") Value.initTag(.bool_true) else Value.initTag(.bool_false); 8793 // sentinel: anytype, 8794 field_values[6] = if (info.sentinel) |some| try Value.Tag.opt_payload.create(sema.arena, some) else Value.@"null"; 8795 8796 return sema.addConstant( 8797 type_info_ty, 8798 try Value.Tag.@"union".create(sema.arena, .{ 8799 .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Pointer)), 8800 .val = try Value.Tag.@"struct".create(sema.arena, field_values), 8801 }), 8802 ); 8803 }, 8804 .Array => { 8805 const info = ty.arrayInfo(); 8806 const field_values = try sema.arena.alloc(Value, 3); 8807 // len: comptime_int, 8808 field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len); 8809 // child: type, 8810 field_values[1] = try Value.Tag.ty.create(sema.arena, info.elem_type); 8811 // sentinel: anytype, 8812 field_values[2] = if (info.sentinel) |some| try Value.Tag.opt_payload.create(sema.arena, some) else Value.@"null"; 8813 8814 return sema.addConstant( 8815 type_info_ty, 8816 try Value.Tag.@"union".create(sema.arena, .{ 8817 .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Array)), 8818 .val = try Value.Tag.@"struct".create(sema.arena, field_values), 8819 }), 8820 ); 8821 }, 8822 .Optional => { 8823 const field_values = try sema.arena.alloc(Value, 1); 8824 // child: type, 8825 field_values[0] = try Value.Tag.ty.create(sema.arena, try ty.optionalChildAlloc(sema.arena)); 8826 8827 return sema.addConstant( 8828 type_info_ty, 8829 try Value.Tag.@"union".create(sema.arena, .{ 8830 .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Optional)), 8831 .val = try Value.Tag.@"struct".create(sema.arena, field_values), 8832 }), 8833 ); 8834 }, 8835 .ErrorUnion => { 8836 const field_values = try sema.arena.alloc(Value, 2); 8837 // error_set: type, 8838 field_values[0] = try Value.Tag.ty.create(sema.arena, ty.errorUnionSet()); 8839 // payload: type, 8840 field_values[1] = try Value.Tag.ty.create(sema.arena, ty.errorUnionPayload()); 8841 8842 return sema.addConstant( 8843 type_info_ty, 8844 try Value.Tag.@"union".create(sema.arena, .{ 8845 .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ErrorUnion)), 8846 .val = try Value.Tag.@"struct".create(sema.arena, field_values), 8847 }), 8848 ); 8849 }, 8850 else => |t| return sema.fail(block, src, "TODO: implement zirTypeInfo for {s}", .{ 8851 @tagName(t), 8852 }), 8853 } 8854} 8855 8856fn zirTypeof(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 8857 _ = block; 8858 const zir_datas = sema.code.instructions.items(.data); 8859 const inst_data = zir_datas[inst].un_node; 8860 const operand = sema.resolveInst(inst_data.operand); 8861 const operand_ty = sema.typeOf(operand); 8862 return sema.addType(operand_ty); 8863} 8864 8865fn zirTypeofLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 8866 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 8867 const src = inst_data.src(); 8868 const operand = sema.resolveInst(inst_data.operand); 8869 const operand_ty = sema.typeOf(operand); 8870 return sema.log2IntType(block, operand_ty, src); 8871} 8872 8873fn zirLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 8874 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 8875 const src = inst_data.src(); 8876 const operand = try sema.resolveType(block, src, inst_data.operand); 8877 return sema.log2IntType(block, operand, src); 8878} 8879 8880fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) CompileError!Air.Inst.Ref { 8881 switch (operand.zigTypeTag()) { 8882 .ComptimeInt => return Air.Inst.Ref.comptime_int_type, 8883 .Int => { 8884 var count: u16 = 0; 8885 var s = operand.bitSize(sema.mod.getTarget()) - 1; 8886 while (s != 0) : (s >>= 1) { 8887 count += 1; 8888 } 8889 const res = try Module.makeIntType(sema.arena, .unsigned, count); 8890 return sema.addType(res); 8891 }, 8892 else => return sema.fail( 8893 block, 8894 src, 8895 "bit shifting operation expected integer type, found '{}'", 8896 .{operand}, 8897 ), 8898 } 8899} 8900 8901fn zirTypeofPeer( 8902 sema: *Sema, 8903 block: *Block, 8904 extended: Zir.Inst.Extended.InstData, 8905) CompileError!Air.Inst.Ref { 8906 const tracy = trace(@src()); 8907 defer tracy.end(); 8908 8909 const extra = sema.code.extraData(Zir.Inst.NodeMultiOp, extended.operand); 8910 const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; 8911 const args = sema.code.refSlice(extra.end, extended.small); 8912 8913 const inst_list = try sema.gpa.alloc(Air.Inst.Ref, args.len); 8914 defer sema.gpa.free(inst_list); 8915 8916 for (args) |arg_ref, i| { 8917 inst_list[i] = sema.resolveInst(arg_ref); 8918 } 8919 8920 const result_type = try sema.resolvePeerTypes(block, src, inst_list, .{ .typeof_builtin_call_node_offset = extra.data.src_node }); 8921 return sema.addType(result_type); 8922} 8923 8924fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 8925 const tracy = trace(@src()); 8926 defer tracy.end(); 8927 8928 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 8929 const src = inst_data.src(); 8930 const operand_src = src; // TODO put this on the operand, not the `!` 8931 const uncasted_operand = sema.resolveInst(inst_data.operand); 8932 8933 const bool_type = Type.initTag(.bool); 8934 const operand = try sema.coerce(block, bool_type, uncasted_operand, operand_src); 8935 if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { 8936 return if (val.isUndef()) 8937 sema.addConstUndef(bool_type) 8938 else if (val.toBool()) 8939 Air.Inst.Ref.bool_false 8940 else 8941 Air.Inst.Ref.bool_true; 8942 } 8943 try sema.requireRuntimeBlock(block, src); 8944 return block.addTyOp(.not, bool_type, operand); 8945} 8946 8947fn zirBoolBr( 8948 sema: *Sema, 8949 parent_block: *Block, 8950 inst: Zir.Inst.Index, 8951 is_bool_or: bool, 8952) CompileError!Air.Inst.Ref { 8953 const tracy = trace(@src()); 8954 defer tracy.end(); 8955 8956 const datas = sema.code.instructions.items(.data); 8957 const inst_data = datas[inst].bool_br; 8958 const lhs = sema.resolveInst(inst_data.lhs); 8959 const lhs_src = sema.src; 8960 const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); 8961 const body = sema.code.extra[extra.end..][0..extra.data.body_len]; 8962 const gpa = sema.gpa; 8963 8964 if (try sema.resolveDefinedValue(parent_block, lhs_src, lhs)) |lhs_val| { 8965 if (lhs_val.toBool() == is_bool_or) { 8966 if (is_bool_or) { 8967 return Air.Inst.Ref.bool_true; 8968 } else { 8969 return Air.Inst.Ref.bool_false; 8970 } 8971 } 8972 // comptime-known left-hand side. No need for a block here; the result 8973 // is simply the rhs expression. Here we rely on there only being 1 8974 // break instruction (`break_inline`). 8975 return sema.resolveBody(parent_block, body); 8976 } 8977 8978 const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); 8979 try sema.air_instructions.append(gpa, .{ 8980 .tag = .block, 8981 .data = .{ .ty_pl = .{ 8982 .ty = .bool_type, 8983 .payload = undefined, 8984 } }, 8985 }); 8986 8987 var child_block = parent_block.makeSubBlock(); 8988 child_block.runtime_loop = null; 8989 child_block.runtime_cond = lhs_src; 8990 child_block.runtime_index += 1; 8991 defer child_block.instructions.deinit(gpa); 8992 8993 var then_block = child_block.makeSubBlock(); 8994 defer then_block.instructions.deinit(gpa); 8995 8996 var else_block = child_block.makeSubBlock(); 8997 defer else_block.instructions.deinit(gpa); 8998 8999 const lhs_block = if (is_bool_or) &then_block else &else_block; 9000 const rhs_block = if (is_bool_or) &else_block else &then_block; 9001 9002 const lhs_result: Air.Inst.Ref = if (is_bool_or) .bool_true else .bool_false; 9003 _ = try lhs_block.addBr(block_inst, lhs_result); 9004 9005 const rhs_result = try sema.resolveBody(rhs_block, body); 9006 _ = try rhs_block.addBr(block_inst, rhs_result); 9007 9008 try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len + 9009 then_block.instructions.items.len + else_block.instructions.items.len + 9010 @typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len + 1); 9011 9012 const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{ 9013 .then_body_len = @intCast(u32, then_block.instructions.items.len), 9014 .else_body_len = @intCast(u32, else_block.instructions.items.len), 9015 }); 9016 sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items); 9017 sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items); 9018 9019 _ = try child_block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{ 9020 .operand = lhs, 9021 .payload = cond_br_payload, 9022 } } }); 9023 9024 sema.air_instructions.items(.data)[block_inst].ty_pl.payload = sema.addExtraAssumeCapacity( 9025 Air.Block{ .body_len = @intCast(u32, child_block.instructions.items.len) }, 9026 ); 9027 sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items); 9028 9029 try parent_block.instructions.append(gpa, block_inst); 9030 return Air.indexToRef(block_inst); 9031} 9032 9033fn zirIsNonNull( 9034 sema: *Sema, 9035 block: *Block, 9036 inst: Zir.Inst.Index, 9037) CompileError!Air.Inst.Ref { 9038 const tracy = trace(@src()); 9039 defer tracy.end(); 9040 9041 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 9042 const src = inst_data.src(); 9043 const operand = sema.resolveInst(inst_data.operand); 9044 return sema.analyzeIsNull(block, src, operand, true); 9045} 9046 9047fn zirIsNonNullPtr( 9048 sema: *Sema, 9049 block: *Block, 9050 inst: Zir.Inst.Index, 9051) CompileError!Air.Inst.Ref { 9052 const tracy = trace(@src()); 9053 defer tracy.end(); 9054 9055 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 9056 const src = inst_data.src(); 9057 const ptr = sema.resolveInst(inst_data.operand); 9058 if ((try sema.resolveMaybeUndefVal(block, src, ptr)) == null) { 9059 return block.addUnOp(.is_non_null_ptr, ptr); 9060 } 9061 const loaded = try sema.analyzeLoad(block, src, ptr, src); 9062 return sema.analyzeIsNull(block, src, loaded, true); 9063} 9064 9065fn zirIsNonErr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9066 const tracy = trace(@src()); 9067 defer tracy.end(); 9068 9069 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 9070 const operand = sema.resolveInst(inst_data.operand); 9071 return sema.analyzeIsNonErr(block, inst_data.src(), operand); 9072} 9073 9074fn zirIsNonErrPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9075 const tracy = trace(@src()); 9076 defer tracy.end(); 9077 9078 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 9079 const src = inst_data.src(); 9080 const ptr = sema.resolveInst(inst_data.operand); 9081 const loaded = try sema.analyzeLoad(block, src, ptr, src); 9082 return sema.analyzeIsNonErr(block, src, loaded); 9083} 9084 9085fn zirCondbr( 9086 sema: *Sema, 9087 parent_block: *Block, 9088 inst: Zir.Inst.Index, 9089) CompileError!Zir.Inst.Index { 9090 const tracy = trace(@src()); 9091 defer tracy.end(); 9092 9093 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 9094 const src = inst_data.src(); 9095 const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; 9096 const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); 9097 9098 const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; 9099 const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; 9100 9101 const uncasted_cond = sema.resolveInst(extra.data.condition); 9102 const cond = try sema.coerce(parent_block, Type.initTag(.bool), uncasted_cond, cond_src); 9103 9104 if (try sema.resolveDefinedValue(parent_block, src, cond)) |cond_val| { 9105 const body = if (cond_val.toBool()) then_body else else_body; 9106 _ = try sema.analyzeBody(parent_block, body); 9107 return always_noreturn; 9108 } 9109 9110 const gpa = sema.gpa; 9111 9112 // We'll re-use the sub block to save on memory bandwidth, and yank out the 9113 // instructions array in between using it for the then block and else block. 9114 var sub_block = parent_block.makeSubBlock(); 9115 sub_block.runtime_loop = null; 9116 sub_block.runtime_cond = cond_src; 9117 sub_block.runtime_index += 1; 9118 defer sub_block.instructions.deinit(gpa); 9119 9120 _ = try sema.analyzeBody(&sub_block, then_body); 9121 const true_instructions = sub_block.instructions.toOwnedSlice(gpa); 9122 defer gpa.free(true_instructions); 9123 9124 _ = try sema.analyzeBody(&sub_block, else_body); 9125 try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len + 9126 true_instructions.len + sub_block.instructions.items.len); 9127 _ = try parent_block.addInst(.{ 9128 .tag = .cond_br, 9129 .data = .{ .pl_op = .{ 9130 .operand = cond, 9131 .payload = sema.addExtraAssumeCapacity(Air.CondBr{ 9132 .then_body_len = @intCast(u32, true_instructions.len), 9133 .else_body_len = @intCast(u32, sub_block.instructions.items.len), 9134 }), 9135 } }, 9136 }); 9137 sema.air_extra.appendSliceAssumeCapacity(true_instructions); 9138 sema.air_extra.appendSliceAssumeCapacity(sub_block.instructions.items); 9139 return always_noreturn; 9140} 9141 9142fn zirUnreachable(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { 9143 const tracy = trace(@src()); 9144 defer tracy.end(); 9145 9146 const inst_data = sema.code.instructions.items(.data)[inst].@"unreachable"; 9147 const src = inst_data.src(); 9148 try sema.requireRuntimeBlock(block, src); 9149 // TODO Add compile error for @optimizeFor occurring too late in a scope. 9150 try block.addUnreachable(src, inst_data.safety); 9151 return always_noreturn; 9152} 9153 9154fn zirRetErrValue( 9155 sema: *Sema, 9156 block: *Block, 9157 inst: Zir.Inst.Index, 9158) CompileError!Zir.Inst.Index { 9159 const inst_data = sema.code.instructions.items(.data)[inst].str_tok; 9160 const err_name = inst_data.get(sema.code); 9161 const src = inst_data.src(); 9162 9163 // Return the error code from the function. 9164 const kv = try sema.mod.getErrorValue(err_name); 9165 const result_inst = try sema.addConstant( 9166 try Type.Tag.error_set_single.create(sema.arena, kv.key), 9167 try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), 9168 ); 9169 return sema.analyzeRet(block, result_inst, src); 9170} 9171 9172fn zirRetCoerce( 9173 sema: *Sema, 9174 block: *Block, 9175 inst: Zir.Inst.Index, 9176) CompileError!Zir.Inst.Index { 9177 const tracy = trace(@src()); 9178 defer tracy.end(); 9179 9180 const inst_data = sema.code.instructions.items(.data)[inst].un_tok; 9181 const operand = sema.resolveInst(inst_data.operand); 9182 const src = inst_data.src(); 9183 9184 return sema.analyzeRet(block, operand, src); 9185} 9186 9187fn zirRetNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { 9188 const tracy = trace(@src()); 9189 defer tracy.end(); 9190 9191 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 9192 const operand = sema.resolveInst(inst_data.operand); 9193 const src = inst_data.src(); 9194 9195 return sema.analyzeRet(block, operand, src); 9196} 9197 9198fn zirRetLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { 9199 const tracy = trace(@src()); 9200 defer tracy.end(); 9201 9202 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 9203 const src = inst_data.src(); 9204 const ret_ptr = sema.resolveInst(inst_data.operand); 9205 9206 if (block.is_comptime or block.inlining != null) { 9207 const operand = try sema.analyzeLoad(block, src, ret_ptr, src); 9208 return sema.analyzeRet(block, operand, src); 9209 } 9210 try sema.requireRuntimeBlock(block, src); 9211 _ = try block.addUnOp(.ret_load, ret_ptr); 9212 return always_noreturn; 9213} 9214 9215fn analyzeRet( 9216 sema: *Sema, 9217 block: *Block, 9218 uncasted_operand: Air.Inst.Ref, 9219 src: LazySrcLoc, 9220) CompileError!Zir.Inst.Index { 9221 // Special case for returning an error to an inferred error set; we need to 9222 // add the error tag to the inferred error set of the in-scope function, so 9223 // that the coercion below works correctly. 9224 if (sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) { 9225 if (sema.fn_ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| { 9226 const op_ty = sema.typeOf(uncasted_operand); 9227 switch (op_ty.zigTypeTag()) { 9228 .ErrorSet => { 9229 try payload.data.addErrorSet(sema.gpa, op_ty); 9230 }, 9231 .ErrorUnion => { 9232 try payload.data.addErrorSet(sema.gpa, op_ty.errorUnionSet()); 9233 }, 9234 else => {}, 9235 } 9236 } 9237 } 9238 const operand = try sema.coerce(block, sema.fn_ret_ty, uncasted_operand, src); 9239 9240 if (block.inlining) |inlining| { 9241 if (block.is_comptime) { 9242 inlining.comptime_result = operand; 9243 return error.ComptimeReturn; 9244 } 9245 // We are inlining a function call; rewrite the `ret` as a `break`. 9246 try inlining.merges.results.append(sema.gpa, operand); 9247 _ = try block.addBr(inlining.merges.block_inst, operand); 9248 return always_noreturn; 9249 } 9250 9251 try sema.resolveTypeLayout(block, src, sema.fn_ret_ty); 9252 _ = try block.addUnOp(.ret, operand); 9253 return always_noreturn; 9254} 9255 9256fn floatOpAllowed(tag: Zir.Inst.Tag) bool { 9257 // extend this swich as additional operators are implemented 9258 return switch (tag) { 9259 .add, .sub, .mul, .div, .div_exact, .div_trunc, .div_floor, .mod, .rem, .mod_rem => true, 9260 else => false, 9261 }; 9262} 9263 9264fn zirPtrTypeSimple(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9265 const tracy = trace(@src()); 9266 defer tracy.end(); 9267 9268 const inst_data = sema.code.instructions.items(.data)[inst].ptr_type_simple; 9269 const elem_type = try sema.resolveType(block, .unneeded, inst_data.elem_type); 9270 const ty = try Type.ptr(sema.arena, .{ 9271 .pointee_type = elem_type, 9272 .@"addrspace" = .generic, 9273 .mutable = inst_data.is_mutable, 9274 .@"allowzero" = inst_data.is_allowzero or inst_data.size == .C, 9275 .@"volatile" = inst_data.is_volatile, 9276 .size = inst_data.size, 9277 }); 9278 return sema.addType(ty); 9279} 9280 9281fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9282 const tracy = trace(@src()); 9283 defer tracy.end(); 9284 9285 const src: LazySrcLoc = .unneeded; 9286 const inst_data = sema.code.instructions.items(.data)[inst].ptr_type; 9287 const extra = sema.code.extraData(Zir.Inst.PtrType, inst_data.payload_index); 9288 9289 var extra_i = extra.end; 9290 9291 const sentinel = if (inst_data.flags.has_sentinel) blk: { 9292 const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); 9293 extra_i += 1; 9294 break :blk (try sema.resolveInstConst(block, .unneeded, ref)).val; 9295 } else null; 9296 9297 const abi_align = if (inst_data.flags.has_align) blk: { 9298 const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); 9299 extra_i += 1; 9300 break :blk try sema.resolveAlreadyCoercedInt(block, .unneeded, ref, u32); 9301 } else 0; 9302 9303 const address_space = if (inst_data.flags.has_addrspace) blk: { 9304 const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); 9305 extra_i += 1; 9306 break :blk try sema.analyzeAddrspace(block, .unneeded, ref, .pointer); 9307 } else .generic; 9308 9309 const bit_start = if (inst_data.flags.has_bit_range) blk: { 9310 const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); 9311 extra_i += 1; 9312 break :blk try sema.resolveAlreadyCoercedInt(block, .unneeded, ref, u16); 9313 } else 0; 9314 9315 const bit_end = if (inst_data.flags.has_bit_range) blk: { 9316 const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); 9317 extra_i += 1; 9318 break :blk try sema.resolveAlreadyCoercedInt(block, .unneeded, ref, u16); 9319 } else 0; 9320 9321 if (bit_end != 0 and bit_start >= bit_end * 8) 9322 return sema.fail(block, src, "bit offset starts after end of host integer", .{}); 9323 9324 const elem_type = try sema.resolveType(block, .unneeded, extra.data.elem_type); 9325 9326 const ty = try Type.ptr(sema.arena, .{ 9327 .pointee_type = elem_type, 9328 .sentinel = sentinel, 9329 .@"align" = abi_align, 9330 .@"addrspace" = address_space, 9331 .bit_offset = bit_start, 9332 .host_size = bit_end, 9333 .mutable = inst_data.flags.is_mutable, 9334 .@"allowzero" = inst_data.flags.is_allowzero or inst_data.size == .C, 9335 .@"volatile" = inst_data.flags.is_volatile, 9336 .size = inst_data.size, 9337 }); 9338 return sema.addType(ty); 9339} 9340 9341fn zirStructInitEmpty(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9342 const tracy = trace(@src()); 9343 defer tracy.end(); 9344 9345 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 9346 const src = inst_data.src(); 9347 const obj_ty = try sema.resolveType(block, src, inst_data.operand); 9348 9349 switch (obj_ty.zigTypeTag()) { 9350 .Struct => return sema.addConstant(obj_ty, Value.initTag(.empty_struct_value)), 9351 .Array => { 9352 if (obj_ty.sentinel()) |sentinel| { 9353 const val = try Value.Tag.empty_array_sentinel.create(sema.arena, sentinel); 9354 return sema.addConstant(obj_ty, val); 9355 } else { 9356 return sema.addConstant(obj_ty, Value.initTag(.empty_array)); 9357 } 9358 }, 9359 .Void => return sema.addConstant(obj_ty, Value.void), 9360 else => unreachable, 9361 } 9362} 9363 9364fn zirUnionInitPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9365 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 9366 const src = inst_data.src(); 9367 return sema.fail(block, src, "TODO: Sema.zirUnionInitPtr", .{}); 9368} 9369 9370fn zirStructInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { 9371 const gpa = sema.gpa; 9372 const zir_datas = sema.code.instructions.items(.data); 9373 const inst_data = zir_datas[inst].pl_node; 9374 const extra = sema.code.extraData(Zir.Inst.StructInit, inst_data.payload_index); 9375 const src = inst_data.src(); 9376 9377 const first_item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end).data; 9378 const first_field_type_data = zir_datas[first_item.field_type].pl_node; 9379 const first_field_type_extra = sema.code.extraData(Zir.Inst.FieldType, first_field_type_data.payload_index).data; 9380 const unresolved_struct_type = try sema.resolveType(block, src, first_field_type_extra.container_type); 9381 const resolved_ty = try sema.resolveTypeFields(block, src, unresolved_struct_type); 9382 9383 if (resolved_ty.castTag(.@"struct")) |struct_payload| { 9384 const struct_obj = struct_payload.data; 9385 9386 // Maps field index to field_type index of where it was already initialized. 9387 // For making sure all fields are accounted for and no fields are duplicated. 9388 const found_fields = try gpa.alloc(Zir.Inst.Index, struct_obj.fields.count()); 9389 defer gpa.free(found_fields); 9390 mem.set(Zir.Inst.Index, found_fields, 0); 9391 9392 // The init values to use for the struct instance. 9393 const field_inits = try gpa.alloc(Air.Inst.Ref, struct_obj.fields.count()); 9394 defer gpa.free(field_inits); 9395 9396 var field_i: u32 = 0; 9397 var extra_index = extra.end; 9398 9399 while (field_i < extra.data.fields_len) : (field_i += 1) { 9400 const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra_index); 9401 extra_index = item.end; 9402 9403 const field_type_data = zir_datas[item.data.field_type].pl_node; 9404 const field_src: LazySrcLoc = .{ .node_offset_back2tok = field_type_data.src_node }; 9405 const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; 9406 const field_name = sema.code.nullTerminatedString(field_type_extra.name_start); 9407 const field_index = struct_obj.fields.getIndex(field_name) orelse 9408 return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); 9409 if (found_fields[field_index] != 0) { 9410 const other_field_type = found_fields[field_index]; 9411 const other_field_type_data = zir_datas[other_field_type].pl_node; 9412 const other_field_src: LazySrcLoc = .{ .node_offset_back2tok = other_field_type_data.src_node }; 9413 const msg = msg: { 9414 const msg = try sema.errMsg(block, field_src, "duplicate field", .{}); 9415 errdefer msg.destroy(gpa); 9416 try sema.errNote(block, other_field_src, msg, "other field here", .{}); 9417 break :msg msg; 9418 }; 9419 return sema.failWithOwnedErrorMsg(msg); 9420 } 9421 found_fields[field_index] = item.data.field_type; 9422 field_inits[field_index] = sema.resolveInst(item.data.init); 9423 } 9424 9425 var root_msg: ?*Module.ErrorMsg = null; 9426 9427 for (found_fields) |field_type_inst, i| { 9428 if (field_type_inst != 0) continue; 9429 9430 // Check if the field has a default init. 9431 const field = struct_obj.fields.values()[i]; 9432 if (field.default_val.tag() == .unreachable_value) { 9433 const field_name = struct_obj.fields.keys()[i]; 9434 const template = "missing struct field: {s}"; 9435 const args = .{field_name}; 9436 if (root_msg) |msg| { 9437 try sema.errNote(block, src, msg, template, args); 9438 } else { 9439 root_msg = try sema.errMsg(block, src, template, args); 9440 } 9441 } else { 9442 field_inits[i] = try sema.addConstant(field.ty, field.default_val); 9443 } 9444 } 9445 if (root_msg) |msg| { 9446 const fqn = try struct_obj.getFullyQualifiedName(gpa); 9447 defer gpa.free(fqn); 9448 try sema.mod.errNoteNonLazy( 9449 struct_obj.srcLoc(), 9450 msg, 9451 "struct '{s}' declared here", 9452 .{fqn}, 9453 ); 9454 return sema.failWithOwnedErrorMsg(msg); 9455 } 9456 9457 if (is_ref) { 9458 return sema.fail(block, src, "TODO: Sema.zirStructInit is_ref=true", .{}); 9459 } 9460 9461 const is_comptime = for (field_inits) |field_init| { 9462 if (!(try sema.isComptimeKnown(block, src, field_init))) { 9463 break false; 9464 } 9465 } else true; 9466 9467 if (is_comptime) { 9468 const values = try sema.arena.alloc(Value, field_inits.len); 9469 for (field_inits) |field_init, i| { 9470 values[i] = (sema.resolveMaybeUndefVal(block, src, field_init) catch unreachable).?; 9471 } 9472 return sema.addConstant(resolved_ty, try Value.Tag.@"struct".create(sema.arena, values)); 9473 } 9474 9475 return sema.fail(block, src, "TODO: Sema.zirStructInit for runtime-known struct values", .{}); 9476 } else if (resolved_ty.cast(Type.Payload.Union)) |union_payload| { 9477 const union_obj = union_payload.data; 9478 9479 if (extra.data.fields_len != 1) { 9480 return sema.fail(block, src, "union initialization expects exactly one field", .{}); 9481 } 9482 9483 const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end); 9484 9485 const field_type_data = zir_datas[item.data.field_type].pl_node; 9486 const field_src: LazySrcLoc = .{ .node_offset_back2tok = field_type_data.src_node }; 9487 const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; 9488 const field_name = sema.code.nullTerminatedString(field_type_extra.name_start); 9489 const field_index_usize = union_obj.fields.getIndex(field_name) orelse 9490 return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name); 9491 const field_index = @intCast(u32, field_index_usize); 9492 9493 if (is_ref) { 9494 return sema.fail(block, src, "TODO: Sema.zirStructInit is_ref=true union", .{}); 9495 } 9496 9497 const init_inst = sema.resolveInst(item.data.init); 9498 if (try sema.resolveMaybeUndefVal(block, field_src, init_inst)) |val| { 9499 const tag_val = try Value.Tag.enum_field_index.create(sema.arena, field_index); 9500 return sema.addConstant( 9501 resolved_ty, 9502 try Value.Tag.@"union".create(sema.arena, .{ .tag = tag_val, .val = val }), 9503 ); 9504 } 9505 return sema.fail(block, src, "TODO: Sema.zirStructInit for runtime-known union values", .{}); 9506 } 9507 unreachable; 9508} 9509 9510fn zirStructInitAnon(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { 9511 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 9512 const src = inst_data.src(); 9513 9514 _ = is_ref; 9515 return sema.fail(block, src, "TODO: Sema.zirStructInitAnon", .{}); 9516} 9517 9518fn zirArrayInit( 9519 sema: *Sema, 9520 block: *Block, 9521 inst: Zir.Inst.Index, 9522 is_ref: bool, 9523) CompileError!Air.Inst.Ref { 9524 const gpa = sema.gpa; 9525 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 9526 const src = inst_data.src(); 9527 9528 const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index); 9529 const args = sema.code.refSlice(extra.end, extra.data.operands_len); 9530 assert(args.len != 0); 9531 9532 const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len); 9533 defer gpa.free(resolved_args); 9534 9535 for (args) |arg, i| resolved_args[i] = sema.resolveInst(arg); 9536 9537 const elem_ty = sema.typeOf(resolved_args[0]); 9538 9539 const array_ty = try Type.Tag.array.create(sema.arena, .{ 9540 .len = resolved_args.len, 9541 .elem_type = elem_ty, 9542 }); 9543 9544 const opt_runtime_src: ?LazySrcLoc = for (resolved_args) |arg| { 9545 const arg_src = src; // TODO better source location 9546 const comptime_known = try sema.isComptimeKnown(block, arg_src, arg); 9547 if (!comptime_known) break arg_src; 9548 } else null; 9549 9550 const runtime_src = opt_runtime_src orelse { 9551 var anon_decl = try block.startAnonDecl(); 9552 defer anon_decl.deinit(); 9553 9554 const elem_vals = try anon_decl.arena().alloc(Value, resolved_args.len); 9555 for (resolved_args) |arg, i| { 9556 // We checked that all args are comptime above. 9557 const arg_val = (sema.resolveMaybeUndefVal(block, src, arg) catch unreachable).?; 9558 elem_vals[i] = try arg_val.copy(anon_decl.arena()); 9559 } 9560 9561 const val = try Value.Tag.array.create(anon_decl.arena(), elem_vals); 9562 const decl = try anon_decl.finish(try array_ty.copy(anon_decl.arena()), val); 9563 if (is_ref) { 9564 return sema.analyzeDeclRef(decl); 9565 } else { 9566 return sema.analyzeDeclVal(block, .unneeded, decl); 9567 } 9568 }; 9569 9570 try sema.requireRuntimeBlock(block, runtime_src); 9571 try sema.resolveTypeLayout(block, src, elem_ty); 9572 9573 const alloc_ty = try Type.ptr(sema.arena, .{ 9574 .pointee_type = array_ty, 9575 .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), 9576 }); 9577 const alloc = try block.addTy(.alloc, alloc_ty); 9578 9579 for (resolved_args) |arg, i| { 9580 const index = try sema.addIntUnsigned(Type.initTag(.u64), i); 9581 const elem_ptr = try block.addBinOp(.ptr_elem_ptr, alloc, index); 9582 _ = try block.addBinOp(.store, elem_ptr, arg); 9583 } 9584 if (is_ref) { 9585 return alloc; 9586 } else { 9587 return sema.analyzeLoad(block, .unneeded, alloc, .unneeded); 9588 } 9589} 9590 9591fn zirArrayInitAnon(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { 9592 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 9593 const src = inst_data.src(); 9594 9595 _ = is_ref; 9596 return sema.fail(block, src, "TODO: Sema.zirArrayInitAnon", .{}); 9597} 9598 9599fn zirFieldTypeRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9600 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 9601 const src = inst_data.src(); 9602 return sema.fail(block, src, "TODO: Sema.zirFieldTypeRef", .{}); 9603} 9604 9605fn zirFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9606 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 9607 const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; 9608 const src = inst_data.src(); 9609 const field_name = sema.code.nullTerminatedString(extra.name_start); 9610 const unresolved_ty = try sema.resolveType(block, src, extra.container_type); 9611 const resolved_ty = try sema.resolveTypeFields(block, src, unresolved_ty); 9612 switch (resolved_ty.zigTypeTag()) { 9613 .Struct => { 9614 const struct_obj = resolved_ty.castTag(.@"struct").?.data; 9615 const field = struct_obj.fields.get(field_name) orelse 9616 return sema.failWithBadStructFieldAccess(block, struct_obj, src, field_name); 9617 return sema.addType(field.ty); 9618 }, 9619 .Union => { 9620 const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; 9621 const field = union_obj.fields.get(field_name) orelse 9622 return sema.failWithBadUnionFieldAccess(block, union_obj, src, field_name); 9623 return sema.addType(field.ty); 9624 }, 9625 else => return sema.fail(block, src, "expected struct or union; found '{}'", .{ 9626 resolved_ty, 9627 }), 9628 } 9629} 9630 9631fn zirErrorReturnTrace( 9632 sema: *Sema, 9633 block: *Block, 9634 extended: Zir.Inst.Extended.InstData, 9635) CompileError!Air.Inst.Ref { 9636 const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; 9637 return sema.fail(block, src, "TODO: Sema.zirErrorReturnTrace", .{}); 9638} 9639 9640fn zirFrame( 9641 sema: *Sema, 9642 block: *Block, 9643 extended: Zir.Inst.Extended.InstData, 9644) CompileError!Air.Inst.Ref { 9645 const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; 9646 return sema.fail(block, src, "TODO: Sema.zirFrame", .{}); 9647} 9648 9649fn zirFrameAddress( 9650 sema: *Sema, 9651 block: *Block, 9652 extended: Zir.Inst.Extended.InstData, 9653) CompileError!Air.Inst.Ref { 9654 const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; 9655 return sema.fail(block, src, "TODO: Sema.zirFrameAddress", .{}); 9656} 9657 9658fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9659 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 9660 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 9661 const ty = try sema.resolveType(block, operand_src, inst_data.operand); 9662 const resolved_ty = try sema.resolveTypeFields(block, operand_src, ty); 9663 try sema.resolveTypeLayout(block, operand_src, resolved_ty); 9664 const target = sema.mod.getTarget(); 9665 const abi_align = resolved_ty.abiAlignment(target); 9666 return sema.addIntUnsigned(Type.comptime_int, abi_align); 9667} 9668 9669fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9670 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 9671 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 9672 const operand = sema.resolveInst(inst_data.operand); 9673 if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { 9674 if (val.isUndef()) return sema.addConstUndef(Type.initTag(.u1)); 9675 const bool_ints = [2]Air.Inst.Ref{ .zero, .one }; 9676 return bool_ints[@boolToInt(val.toBool())]; 9677 } 9678 return block.addUnOp(.bool_to_int, operand); 9679} 9680 9681fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9682 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 9683 const src = inst_data.src(); 9684 return sema.fail(block, src, "TODO: Sema.zirErrorName", .{}); 9685} 9686 9687fn zirUnaryMath(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9688 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 9689 const src = inst_data.src(); 9690 return sema.fail(block, src, "TODO: Sema.zirUnaryMath", .{}); 9691} 9692 9693fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9694 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 9695 const src = inst_data.src(); 9696 return sema.fail(block, src, "TODO: Sema.zirTagName", .{}); 9697} 9698 9699fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9700 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 9701 const src = inst_data.src(); 9702 const type_info_ty = try sema.resolveBuiltinTypeFields(block, src, "TypeInfo"); 9703 const uncasted_operand = sema.resolveInst(inst_data.operand); 9704 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 9705 const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src); 9706 const val = try sema.resolveConstValue(block, operand_src, type_info); 9707 const union_val = val.cast(Value.Payload.Union).?.data; 9708 const tag_ty = type_info_ty.unionTagType().?; 9709 const tag_index = tag_ty.enumTagFieldIndex(union_val.tag).?; 9710 switch (@intToEnum(std.builtin.TypeId, tag_index)) { 9711 .Type => return Air.Inst.Ref.type_type, 9712 .Void => return Air.Inst.Ref.void_type, 9713 .Bool => return Air.Inst.Ref.bool_type, 9714 .NoReturn => return Air.Inst.Ref.noreturn_type, 9715 .ComptimeFloat => return Air.Inst.Ref.comptime_float_type, 9716 .ComptimeInt => return Air.Inst.Ref.comptime_int_type, 9717 .Undefined => return Air.Inst.Ref.undefined_type, 9718 .Null => return Air.Inst.Ref.null_type, 9719 .AnyFrame => return Air.Inst.Ref.anyframe_type, 9720 .EnumLiteral => return Air.Inst.Ref.enum_literal_type, 9721 .Int => { 9722 const struct_val = union_val.val.castTag(.@"struct").?.data; 9723 // TODO use reflection instead of magic numbers here 9724 const signedness_val = struct_val[0]; 9725 const bits_val = struct_val[1]; 9726 9727 const signedness = signedness_val.toEnum(std.builtin.Signedness); 9728 const bits = @intCast(u16, bits_val.toUnsignedInt()); 9729 const ty = switch (signedness) { 9730 .signed => try Type.Tag.int_signed.create(sema.arena, bits), 9731 .unsigned => try Type.Tag.int_unsigned.create(sema.arena, bits), 9732 }; 9733 return sema.addType(ty); 9734 }, 9735 .Vector => { 9736 const struct_val = union_val.val.castTag(.@"struct").?.data; 9737 // TODO use reflection instead of magic numbers here 9738 const len_val = struct_val[0]; 9739 const child_val = struct_val[1]; 9740 9741 const len = len_val.toUnsignedInt(); 9742 var buffer: Value.ToTypeBuffer = undefined; 9743 const child_ty = child_val.toType(&buffer); 9744 9745 const ty = try Type.vector(sema.arena, len, child_ty); 9746 return sema.addType(ty); 9747 }, 9748 .Float => return sema.fail(block, src, "TODO: Sema.zirReify for Float", .{}), 9749 .Pointer => return sema.fail(block, src, "TODO: Sema.zirReify for Pointer", .{}), 9750 .Array => return sema.fail(block, src, "TODO: Sema.zirReify for Array", .{}), 9751 .Struct => return sema.fail(block, src, "TODO: Sema.zirReify for Struct", .{}), 9752 .Optional => return sema.fail(block, src, "TODO: Sema.zirReify for Optional", .{}), 9753 .ErrorUnion => return sema.fail(block, src, "TODO: Sema.zirReify for ErrorUnion", .{}), 9754 .ErrorSet => return sema.fail(block, src, "TODO: Sema.zirReify for ErrorSet", .{}), 9755 .Enum => return sema.fail(block, src, "TODO: Sema.zirReify for Enum", .{}), 9756 .Union => return sema.fail(block, src, "TODO: Sema.zirReify for Union", .{}), 9757 .Fn => return sema.fail(block, src, "TODO: Sema.zirReify for Fn", .{}), 9758 .BoundFn => @panic("TODO delete BoundFn from the language"), 9759 .Opaque => return sema.fail(block, src, "TODO: Sema.zirReify for Opaque", .{}), 9760 .Frame => return sema.fail(block, src, "TODO: Sema.zirReify for Frame", .{}), 9761 } 9762} 9763 9764fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9765 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 9766 const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 9767 const ty = try sema.resolveType(block, ty_src, inst_data.operand); 9768 9769 var anon_decl = try block.startAnonDecl(); 9770 defer anon_decl.deinit(); 9771 9772 const bytes = try ty.nameAlloc(anon_decl.arena()); 9773 9774 const new_decl = try anon_decl.finish( 9775 try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), 9776 try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 9777 ); 9778 9779 return sema.analyzeDeclRef(new_decl); 9780} 9781 9782fn zirFrameType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9783 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 9784 const src = inst_data.src(); 9785 return sema.fail(block, src, "TODO: Sema.zirFrameType", .{}); 9786} 9787 9788fn zirFrameSize(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9789 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 9790 const src = inst_data.src(); 9791 return sema.fail(block, src, "TODO: Sema.zirFrameSize", .{}); 9792} 9793 9794fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9795 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 9796 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 9797 const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 9798 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 9799 const dest_ty = try sema.resolveType(block, ty_src, extra.lhs); 9800 const operand = sema.resolveInst(extra.rhs); 9801 const operand_ty = sema.typeOf(operand); 9802 9803 _ = try sema.checkIntType(block, ty_src, dest_ty); 9804 try sema.checkFloatType(block, operand_src, operand_ty); 9805 9806 if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { 9807 const target = sema.mod.getTarget(); 9808 const result_val = val.floatToInt(sema.arena, dest_ty, target) catch |err| switch (err) { 9809 error.FloatCannotFit => { 9810 return sema.fail(block, operand_src, "integer value {d} cannot be stored in type '{}'", .{ std.math.floor(val.toFloat(f64)), dest_ty }); 9811 }, 9812 else => |e| return e, 9813 }; 9814 return sema.addConstant(dest_ty, result_val); 9815 } 9816 9817 try sema.requireRuntimeBlock(block, operand_src); 9818 return block.addTyOp(.float_to_int, dest_ty, operand); 9819} 9820 9821fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9822 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 9823 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 9824 const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 9825 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 9826 const dest_ty = try sema.resolveType(block, ty_src, extra.lhs); 9827 const operand = sema.resolveInst(extra.rhs); 9828 const operand_ty = sema.typeOf(operand); 9829 9830 try sema.checkFloatType(block, ty_src, dest_ty); 9831 _ = try sema.checkIntType(block, operand_src, operand_ty); 9832 9833 if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { 9834 const target = sema.mod.getTarget(); 9835 const result_val = try val.intToFloat(sema.arena, dest_ty, target); 9836 return sema.addConstant(dest_ty, result_val); 9837 } 9838 9839 try sema.requireRuntimeBlock(block, operand_src); 9840 return block.addTyOp(.int_to_float, dest_ty, operand); 9841} 9842 9843fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9844 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 9845 const src = inst_data.src(); 9846 9847 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 9848 9849 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 9850 const operand_res = sema.resolveInst(extra.rhs); 9851 const operand_coerced = try sema.coerce(block, Type.usize, operand_res, operand_src); 9852 9853 const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 9854 const type_res = try sema.resolveType(block, src, extra.lhs); 9855 if (type_res.zigTypeTag() != .Pointer) 9856 return sema.fail(block, type_src, "expected pointer, found '{}'", .{type_res}); 9857 const ptr_align = type_res.ptrAlignment(sema.mod.getTarget()); 9858 9859 if (try sema.resolveDefinedValue(block, operand_src, operand_coerced)) |val| { 9860 const addr = val.toUnsignedInt(); 9861 if (!type_res.isAllowzeroPtr() and addr == 0) 9862 return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{type_res}); 9863 if (addr != 0 and addr % ptr_align != 0) 9864 return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{type_res}); 9865 9866 const val_payload = try sema.arena.create(Value.Payload.U64); 9867 val_payload.* = .{ 9868 .base = .{ .tag = .int_u64 }, 9869 .data = addr, 9870 }; 9871 return sema.addConstant(type_res, Value.initPayload(&val_payload.base)); 9872 } 9873 9874 try sema.requireRuntimeBlock(block, src); 9875 if (block.wantSafety()) { 9876 if (!type_res.isAllowzeroPtr()) { 9877 const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize); 9878 try sema.addSafetyCheck(block, is_non_zero, .cast_to_null); 9879 } 9880 9881 if (ptr_align > 1) { 9882 const val_payload = try sema.arena.create(Value.Payload.U64); 9883 val_payload.* = .{ 9884 .base = .{ .tag = .int_u64 }, 9885 .data = ptr_align - 1, 9886 }; 9887 const align_minus_1 = try sema.addConstant( 9888 Type.usize, 9889 Value.initPayload(&val_payload.base), 9890 ); 9891 const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1); 9892 const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); 9893 try sema.addSafetyCheck(block, is_aligned, .incorrect_alignment); 9894 } 9895 } 9896 return block.addBitCast(type_res, operand_coerced); 9897} 9898 9899fn zirErrSetCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9900 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 9901 const src = inst_data.src(); 9902 return sema.fail(block, src, "TODO: Sema.zirErrSetCast", .{}); 9903} 9904 9905fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9906 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 9907 const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 9908 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 9909 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 9910 const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); 9911 const operand = sema.resolveInst(extra.rhs); 9912 const operand_ty = sema.typeOf(operand); 9913 if (operand_ty.zigTypeTag() != .Pointer) { 9914 return sema.fail(block, operand_src, "expected pointer, found {s} type '{}'", .{ 9915 @tagName(operand_ty.zigTypeTag()), operand_ty, 9916 }); 9917 } 9918 if (dest_ty.zigTypeTag() != .Pointer) { 9919 return sema.fail(block, dest_ty_src, "expected pointer, found {s} type '{}'", .{ 9920 @tagName(dest_ty.zigTypeTag()), dest_ty, 9921 }); 9922 } 9923 return sema.coerceCompatiblePtrs(block, dest_ty, operand, operand_src); 9924} 9925 9926fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9927 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 9928 const src = inst_data.src(); 9929 const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 9930 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 9931 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 9932 const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); 9933 const operand = sema.resolveInst(extra.rhs); 9934 const operand_ty = sema.typeOf(operand); 9935 const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_ty); 9936 const src_is_comptime_int = try sema.checkIntType(block, operand_src, operand_ty); 9937 9938 if (dest_is_comptime_int) { 9939 return sema.coerce(block, dest_ty, operand, operand_src); 9940 } 9941 9942 const target = sema.mod.getTarget(); 9943 const dest_info = dest_ty.intInfo(target); 9944 9945 if (dest_info.bits == 0) { 9946 return sema.addConstant(dest_ty, Value.zero); 9947 } 9948 9949 if (!src_is_comptime_int) { 9950 const src_info = operand_ty.intInfo(target); 9951 if (src_info.bits == 0) { 9952 return sema.addConstant(dest_ty, Value.zero); 9953 } 9954 9955 if (src_info.signedness != dest_info.signedness) { 9956 return sema.fail(block, operand_src, "expected {s} integer type, found '{}'", .{ 9957 @tagName(dest_info.signedness), operand_ty, 9958 }); 9959 } 9960 if (src_info.bits > 0 and src_info.bits < dest_info.bits) { 9961 const msg = msg: { 9962 const msg = try sema.errMsg( 9963 block, 9964 src, 9965 "destination type '{}' has more bits than source type '{}'", 9966 .{ dest_ty, operand_ty }, 9967 ); 9968 errdefer msg.destroy(sema.gpa); 9969 try sema.errNote(block, dest_ty_src, msg, "destination type has {d} bits", .{ 9970 dest_info.bits, 9971 }); 9972 try sema.errNote(block, operand_src, msg, "source type has {d} bits", .{ 9973 src_info.bits, 9974 }); 9975 break :msg msg; 9976 }; 9977 return sema.failWithOwnedErrorMsg(msg); 9978 } 9979 } 9980 9981 if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { 9982 if (val.isUndef()) return sema.addConstUndef(dest_ty); 9983 return sema.addConstant(dest_ty, try val.intTrunc(sema.arena, dest_info.signedness, dest_info.bits)); 9984 } 9985 9986 try sema.requireRuntimeBlock(block, src); 9987 return block.addTyOp(.trunc, dest_ty, operand); 9988} 9989 9990fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 9991 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 9992 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 9993 const align_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 9994 const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 9995 const dest_align = try sema.resolveAlign(block, align_src, extra.lhs); 9996 const ptr = sema.resolveInst(extra.rhs); 9997 const ptr_ty = sema.typeOf(ptr); 9998 9999 // TODO in addition to pointers, this instruction is supposed to work for 10000 // pointer-like optionals and slices. 10001 try sema.checkPtrType(block, ptr_src, ptr_ty); 10002 10003 // TODO compile error if the result pointer is comptime known and would have an 10004 // alignment that disagrees with the Decl's alignment. 10005 10006 // TODO insert safety check that the alignment is correct 10007 10008 const ptr_info = ptr_ty.ptrInfo().data; 10009 const dest_ty = try Type.ptr(sema.arena, .{ 10010 .pointee_type = ptr_info.pointee_type, 10011 .@"align" = dest_align, 10012 .@"addrspace" = ptr_info.@"addrspace", 10013 .mutable = ptr_info.mutable, 10014 .@"allowzero" = ptr_info.@"allowzero", 10015 .@"volatile" = ptr_info.@"volatile", 10016 .size = ptr_info.size, 10017 }); 10018 return sema.coerceCompatiblePtrs(block, dest_ty, ptr, ptr_src); 10019} 10020 10021fn zirClz(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 10022 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 10023 const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 10024 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 10025 const operand = sema.resolveInst(inst_data.operand); 10026 const operand_ty = sema.typeOf(operand); 10027 // TODO implement support for vectors 10028 if (operand_ty.zigTypeTag() != .Int) { 10029 return sema.fail(block, ty_src, "expected integer type, found '{}'", .{ 10030 operand_ty, 10031 }); 10032 } 10033 const target = sema.mod.getTarget(); 10034 const bits = operand_ty.intInfo(target).bits; 10035 if (bits == 0) return Air.Inst.Ref.zero; 10036 10037 const result_ty = try Type.smallestUnsignedInt(sema.arena, bits); 10038 10039 const runtime_src = if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { 10040 if (val.isUndef()) return sema.addConstUndef(result_ty); 10041 return sema.addIntUnsigned(result_ty, val.clz(operand_ty, target)); 10042 } else operand_src; 10043 10044 try sema.requireRuntimeBlock(block, runtime_src); 10045 return block.addTyOp(.clz, result_ty, operand); 10046} 10047 10048fn zirCtz(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 10049 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 10050 const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 10051 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 10052 const operand = sema.resolveInst(inst_data.operand); 10053 const operand_ty = sema.typeOf(operand); 10054 // TODO implement support for vectors 10055 if (operand_ty.zigTypeTag() != .Int) { 10056 return sema.fail(block, ty_src, "expected integer type, found '{}'", .{ 10057 operand_ty, 10058 }); 10059 } 10060 const target = sema.mod.getTarget(); 10061 const bits = operand_ty.intInfo(target).bits; 10062 if (bits == 0) return Air.Inst.Ref.zero; 10063 10064 const result_ty = try Type.smallestUnsignedInt(sema.arena, bits); 10065 10066 const runtime_src = if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { 10067 if (val.isUndef()) return sema.addConstUndef(result_ty); 10068 return sema.fail(block, operand_src, "TODO: implement comptime @ctz", .{}); 10069 } else operand_src; 10070 10071 try sema.requireRuntimeBlock(block, runtime_src); 10072 return block.addTyOp(.ctz, result_ty, operand); 10073} 10074 10075fn zirPopCount(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 10076 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 10077 const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 10078 const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 10079 const operand = sema.resolveInst(inst_data.operand); 10080 const operand_ty = sema.typeOf(operand); 10081 // TODO implement support for vectors 10082 if (operand_ty.zigTypeTag() != .Int) { 10083 return sema.fail(block, ty_src, "expected integer type, found '{}'", .{ 10084 operand_ty, 10085 }); 10086 } 10087 const target = sema.mod.getTarget(); 10088 const bits = operand_ty.intInfo(target).bits; 10089 if (bits == 0) return Air.Inst.Ref.zero; 10090 10091 const result_ty = try Type.smallestUnsignedInt(sema.arena, bits); 10092 10093 const runtime_src = if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { 10094 if (val.isUndef()) return sema.addConstUndef(result_ty); 10095 const result_val = try val.popCount(operand_ty, target, sema.arena); 10096 return sema.addConstant(result_ty, result_val); 10097 } else operand_src; 10098 10099 try sema.requireRuntimeBlock(block, runtime_src); 10100 return block.addTyOp(.popcount, result_ty, operand); 10101} 10102 10103fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 10104 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 10105 const src = inst_data.src(); 10106 return sema.fail(block, src, "TODO: Sema.zirByteSwap", .{}); 10107} 10108 10109fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 10110 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 10111 const src = inst_data.src(); 10112 return sema.fail(block, src, "TODO: Sema.zirBitReverse", .{}); 10113} 10114 10115fn zirShrExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 10116 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 10117 const src = inst_data.src(); 10118 return sema.fail(block, src, "TODO: Sema.zirShrExact", .{}); 10119} 10120 10121fn zirBitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 10122 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 10123 const src = inst_data.src(); 10124 return sema.fail(block, src, "TODO: Sema.zirBitOffsetOf", .{}); 10125} 10126 10127fn zirOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 10128 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 10129 const src = inst_data.src(); 10130 return sema.fail(block, src, "TODO: Sema.zirOffsetOf", .{}); 10131} 10132 10133/// Returns `true` if the type was a comptime_int. 10134fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool { 10135 switch (ty.zigTypeTag()) { 10136 .ComptimeInt => return true, 10137 .Int => return false, 10138 else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty}), 10139 } 10140} 10141 10142fn checkPtrType( 10143 sema: *Sema, 10144 block: *Block, 10145 ty_src: LazySrcLoc, 10146 ty: Type, 10147) CompileError!void { 10148 switch (ty.zigTypeTag()) { 10149 .Pointer => {}, 10150 else => return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty}), 10151 } 10152} 10153 10154fn checkFloatType( 10155 sema: *Sema, 10156 block: *Block, 10157 ty_src: LazySrcLoc, 10158 ty: Type, 10159) CompileError!void { 10160 switch (ty.zigTypeTag()) { 10161 .ComptimeFloat, .Float => {}, 10162 else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty}), 10163 } 10164} 10165 10166fn checkNumericType( 10167 sema: *Sema, 10168 block: *Block, 10169 ty_src: LazySrcLoc, 10170 ty: Type, 10171) CompileError!void { 10172 switch (ty.zigTypeTag()) { 10173 .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, 10174 .Vector => switch (ty.childType().zigTypeTag()) { 10175 .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, 10176 else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}), 10177 }, 10178 else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty}), 10179 } 10180} 10181 10182fn checkAtomicOperandType( 10183 sema: *Sema, 10184 block: *Block, 10185 ty_src: LazySrcLoc, 10186 ty: Type, 10187) CompileError!void { 10188 var buffer: Type.Payload.Bits = undefined; 10189 const target = sema.mod.getTarget(); 10190 const max_atomic_bits = target_util.largestAtomicBits(target); 10191 const int_ty = switch (ty.zigTypeTag()) { 10192 .Int => ty, 10193 .Enum => ty.intTagType(&buffer), 10194 .Float => { 10195 const bit_count = ty.floatBits(target); 10196 if (bit_count > max_atomic_bits) { 10197 return sema.fail( 10198 block, 10199 ty_src, 10200 "expected {d}-bit float type or smaller; found {d}-bit float type", 10201 .{ max_atomic_bits, bit_count }, 10202 ); 10203 } 10204 return; 10205 }, 10206 .Bool => return, // Will be treated as `u8`. 10207 else => { 10208 if (ty.isPtrAtRuntime()) return; 10209 10210 return sema.fail( 10211 block, 10212 ty_src, 10213 "expected bool, integer, float, enum, or pointer type; found {}", 10214 .{ty}, 10215 ); 10216 }, 10217 }; 10218 const bit_count = int_ty.intInfo(target).bits; 10219 if (bit_count > max_atomic_bits) { 10220 return sema.fail( 10221 block, 10222 ty_src, 10223 "expected {d}-bit integer type or smaller; found {d}-bit integer type", 10224 .{ max_atomic_bits, bit_count }, 10225 ); 10226 } 10227} 10228 10229fn checkPtrIsNotComptimeMutable( 10230 sema: *Sema, 10231 block: *Block, 10232 ptr_val: Value, 10233 ptr_src: LazySrcLoc, 10234 operand_src: LazySrcLoc, 10235) CompileError!void { 10236 _ = operand_src; 10237 if (ptr_val.isComptimeMutablePtr()) { 10238 return sema.fail(block, ptr_src, "cannot store runtime value in compile time variable", .{}); 10239 } 10240} 10241 10242fn checkComptimeVarStore( 10243 sema: *Sema, 10244 block: *Block, 10245 src: LazySrcLoc, 10246 decl_ref_mut: Value.Payload.DeclRefMut.Data, 10247) CompileError!void { 10248 if (decl_ref_mut.runtime_index < block.runtime_index) { 10249 if (block.runtime_cond) |cond_src| { 10250 const msg = msg: { 10251 const msg = try sema.errMsg(block, src, "store to comptime variable depends on runtime condition", .{}); 10252 errdefer msg.destroy(sema.gpa); 10253 try sema.errNote(block, cond_src, msg, "runtime condition here", .{}); 10254 break :msg msg; 10255 }; 10256 return sema.failWithOwnedErrorMsg(msg); 10257 } 10258 if (block.runtime_loop) |loop_src| { 10259 const msg = msg: { 10260 const msg = try sema.errMsg(block, src, "cannot store to comptime variable in non-inline loop", .{}); 10261 errdefer msg.destroy(sema.gpa); 10262 try sema.errNote(block, loop_src, msg, "non-inline loop here", .{}); 10263 break :msg msg; 10264 }; 10265 return sema.failWithOwnedErrorMsg(msg); 10266 } 10267 unreachable; 10268 } 10269} 10270 10271const SimdBinOp = struct { 10272 len: ?usize, 10273 /// Coerced to `result_ty`. 10274 lhs: Air.Inst.Ref, 10275 /// Coerced to `result_ty`. 10276 rhs: Air.Inst.Ref, 10277 lhs_val: ?Value, 10278 rhs_val: ?Value, 10279 /// Only different than `scalar_ty` when it is a vector operation. 10280 result_ty: Type, 10281 scalar_ty: Type, 10282}; 10283 10284fn checkSimdBinOp( 10285 sema: *Sema, 10286 block: *Block, 10287 src: LazySrcLoc, 10288 uncasted_lhs: Air.Inst.Ref, 10289 uncasted_rhs: Air.Inst.Ref, 10290 lhs_src: LazySrcLoc, 10291 rhs_src: LazySrcLoc, 10292) CompileError!SimdBinOp { 10293 const lhs_ty = sema.typeOf(uncasted_lhs); 10294 const rhs_ty = sema.typeOf(uncasted_rhs); 10295 const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); 10296 const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); 10297 10298 var vec_len: ?usize = null; 10299 if (lhs_zig_ty_tag == .Vector and rhs_zig_ty_tag == .Vector) { 10300 const lhs_len = lhs_ty.arrayLen(); 10301 const rhs_len = rhs_ty.arrayLen(); 10302 if (lhs_len != rhs_len) { 10303 const msg = msg: { 10304 const msg = try sema.errMsg(block, src, "vector length mismatch", .{}); 10305 errdefer msg.destroy(sema.gpa); 10306 try sema.errNote(block, lhs_src, msg, "length {d} here", .{lhs_len}); 10307 try sema.errNote(block, rhs_src, msg, "length {d} here", .{rhs_len}); 10308 break :msg msg; 10309 }; 10310 return sema.failWithOwnedErrorMsg(msg); 10311 } 10312 vec_len = try sema.usizeCast(block, lhs_src, lhs_len); 10313 } else if (lhs_zig_ty_tag == .Vector or rhs_zig_ty_tag == .Vector) { 10314 const msg = msg: { 10315 const msg = try sema.errMsg(block, src, "mixed scalar and vector operands: {} and {}", .{ 10316 lhs_ty, rhs_ty, 10317 }); 10318 errdefer msg.destroy(sema.gpa); 10319 if (lhs_zig_ty_tag == .Vector) { 10320 try sema.errNote(block, lhs_src, msg, "vector here", .{}); 10321 try sema.errNote(block, rhs_src, msg, "scalar here", .{}); 10322 } else { 10323 try sema.errNote(block, lhs_src, msg, "scalar here", .{}); 10324 try sema.errNote(block, rhs_src, msg, "vector here", .{}); 10325 } 10326 break :msg msg; 10327 }; 10328 return sema.failWithOwnedErrorMsg(msg); 10329 } 10330 const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{ 10331 .override = &[_]LazySrcLoc{ lhs_src, rhs_src }, 10332 }); 10333 const lhs = try sema.coerce(block, result_ty, uncasted_lhs, lhs_src); 10334 const rhs = try sema.coerce(block, result_ty, uncasted_rhs, rhs_src); 10335 10336 return SimdBinOp{ 10337 .len = vec_len, 10338 .lhs = lhs, 10339 .rhs = rhs, 10340 .lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs), 10341 .rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, rhs), 10342 .result_ty = result_ty, 10343 .scalar_ty = result_ty.scalarType(), 10344 }; 10345} 10346 10347fn resolveExportOptions( 10348 sema: *Sema, 10349 block: *Block, 10350 src: LazySrcLoc, 10351 zir_ref: Zir.Inst.Ref, 10352) CompileError!std.builtin.ExportOptions { 10353 const export_options_ty = try sema.getBuiltinType(block, src, "ExportOptions"); 10354 const air_ref = sema.resolveInst(zir_ref); 10355 const coerced = try sema.coerce(block, export_options_ty, air_ref, src); 10356 const val = try sema.resolveConstValue(block, src, coerced); 10357 const fields = val.castTag(.@"struct").?.data; 10358 const struct_obj = export_options_ty.castTag(.@"struct").?.data; 10359 const name_index = struct_obj.fields.getIndex("name").?; 10360 const linkage_index = struct_obj.fields.getIndex("linkage").?; 10361 const section_index = struct_obj.fields.getIndex("section").?; 10362 if (!fields[section_index].isNull()) { 10363 return sema.fail(block, src, "TODO: implement exporting with linksection", .{}); 10364 } 10365 const name_ty = Type.initTag(.const_slice_u8); 10366 return std.builtin.ExportOptions{ 10367 .name = try fields[name_index].toAllocatedBytes(name_ty, sema.arena), 10368 .linkage = fields[linkage_index].toEnum(std.builtin.GlobalLinkage), 10369 .section = null, // TODO 10370 }; 10371} 10372 10373fn resolveAtomicOrder( 10374 sema: *Sema, 10375 block: *Block, 10376 src: LazySrcLoc, 10377 zir_ref: Zir.Inst.Ref, 10378) CompileError!std.builtin.AtomicOrder { 10379 const atomic_order_ty = try sema.getBuiltinType(block, src, "AtomicOrder"); 10380 const air_ref = sema.resolveInst(zir_ref); 10381 const coerced = try sema.coerce(block, atomic_order_ty, air_ref, src); 10382 const val = try sema.resolveConstValue(block, src, coerced); 10383 return val.toEnum(std.builtin.AtomicOrder); 10384} 10385 10386fn resolveAtomicRmwOp( 10387 sema: *Sema, 10388 block: *Block, 10389 src: LazySrcLoc, 10390 zir_ref: Zir.Inst.Ref, 10391) CompileError!std.builtin.AtomicRmwOp { 10392 const atomic_rmw_op_ty = try sema.getBuiltinType(block, src, "AtomicRmwOp"); 10393 const air_ref = sema.resolveInst(zir_ref); 10394 const coerced = try sema.coerce(block, atomic_rmw_op_ty, air_ref, src); 10395 const val = try sema.resolveConstValue(block, src, coerced); 10396 return val.toEnum(std.builtin.AtomicRmwOp); 10397} 10398 10399fn zirCmpxchg( 10400 sema: *Sema, 10401 block: *Block, 10402 inst: Zir.Inst.Index, 10403 air_tag: Air.Inst.Tag, 10404) CompileError!Air.Inst.Ref { 10405 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 10406 const extra = sema.code.extraData(Zir.Inst.Cmpxchg, inst_data.payload_index).data; 10407 const src = inst_data.src(); 10408 // zig fmt: off 10409 const elem_ty_src : LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 10410 const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 10411 const expected_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; 10412 const new_value_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node }; 10413 const success_order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg4 = inst_data.src_node }; 10414 const failure_order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg5 = inst_data.src_node }; 10415 // zig fmt: on 10416 const ptr = sema.resolveInst(extra.ptr); 10417 const ptr_ty = sema.typeOf(ptr); 10418 const elem_ty = ptr_ty.elemType(); 10419 try sema.checkAtomicOperandType(block, elem_ty_src, elem_ty); 10420 if (elem_ty.zigTypeTag() == .Float) { 10421 return sema.fail( 10422 block, 10423 elem_ty_src, 10424 "expected bool, integer, enum, or pointer type; found '{}'", 10425 .{elem_ty}, 10426 ); 10427 } 10428 const expected_value = try sema.coerce(block, elem_ty, sema.resolveInst(extra.expected_value), expected_src); 10429 const new_value = try sema.coerce(block, elem_ty, sema.resolveInst(extra.new_value), new_value_src); 10430 const success_order = try sema.resolveAtomicOrder(block, success_order_src, extra.success_order); 10431 const failure_order = try sema.resolveAtomicOrder(block, failure_order_src, extra.failure_order); 10432 10433 if (@enumToInt(success_order) < @enumToInt(std.builtin.AtomicOrder.Monotonic)) { 10434 return sema.fail(block, success_order_src, "success atomic ordering must be Monotonic or stricter", .{}); 10435 } 10436 if (@enumToInt(failure_order) < @enumToInt(std.builtin.AtomicOrder.Monotonic)) { 10437 return sema.fail(block, failure_order_src, "failure atomic ordering must be Monotonic or stricter", .{}); 10438 } 10439 if (@enumToInt(failure_order) > @enumToInt(success_order)) { 10440 return sema.fail(block, failure_order_src, "failure atomic ordering must be no stricter than success", .{}); 10441 } 10442 if (failure_order == .Release or failure_order == .AcqRel) { 10443 return sema.fail(block, failure_order_src, "failure atomic ordering must not be Release or AcqRel", .{}); 10444 } 10445 10446 const result_ty = try Type.optional(sema.arena, elem_ty); 10447 10448 // special case zero bit types 10449 if ((try sema.typeHasOnePossibleValue(block, elem_ty_src, elem_ty)) != null) { 10450 return sema.addConstant(result_ty, Value.@"null"); 10451 } 10452 10453 const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { 10454 if (try sema.resolveMaybeUndefVal(block, expected_src, expected_value)) |expected_val| { 10455 if (try sema.resolveMaybeUndefVal(block, new_value_src, new_value)) |new_val| { 10456 if (expected_val.isUndef() or new_val.isUndef()) { 10457 // TODO: this should probably cause the memory stored at the pointer 10458 // to become undef as well 10459 return sema.addConstUndef(result_ty); 10460 } 10461 const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; 10462 const result_val = if (stored_val.eql(expected_val, elem_ty)) blk: { 10463 try sema.storePtr(block, src, ptr, new_value); 10464 break :blk Value.@"null"; 10465 } else try Value.Tag.opt_payload.create(sema.arena, stored_val); 10466 10467 return sema.addConstant(result_ty, result_val); 10468 } else break :rs new_value_src; 10469 } else break :rs expected_src; 10470 } else ptr_src; 10471 10472 const flags: u32 = @as(u32, @enumToInt(success_order)) | 10473 (@as(u32, @enumToInt(failure_order)) << 3); 10474 10475 try sema.requireRuntimeBlock(block, runtime_src); 10476 return block.addInst(.{ 10477 .tag = air_tag, 10478 .data = .{ .ty_pl = .{ 10479 .ty = try sema.addType(result_ty), 10480 .payload = try sema.addExtra(Air.Cmpxchg{ 10481 .ptr = ptr, 10482 .expected_value = expected_value, 10483 .new_value = new_value, 10484 .flags = flags, 10485 }), 10486 } }, 10487 }); 10488} 10489 10490fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 10491 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 10492 const src = inst_data.src(); 10493 return sema.fail(block, src, "TODO: Sema.zirSplat", .{}); 10494} 10495 10496fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 10497 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 10498 const src = inst_data.src(); 10499 return sema.fail(block, src, "TODO: Sema.zirReduce", .{}); 10500} 10501 10502fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 10503 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 10504 const src = inst_data.src(); 10505 return sema.fail(block, src, "TODO: Sema.zirShuffle", .{}); 10506} 10507 10508fn zirSelect(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 10509 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 10510 const src = inst_data.src(); 10511 return sema.fail(block, src, "TODO: Sema.zirSelect", .{}); 10512} 10513 10514fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 10515 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 10516 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 10517 // zig fmt: off 10518 const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 10519 const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 10520 const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; 10521 // zig fmt: on 10522 const ptr = sema.resolveInst(extra.lhs); 10523 const ptr_ty = sema.typeOf(ptr); 10524 const elem_ty = ptr_ty.elemType(); 10525 try sema.checkAtomicOperandType(block, elem_ty_src, elem_ty); 10526 const order = try sema.resolveAtomicOrder(block, order_src, extra.rhs); 10527 10528 switch (order) { 10529 .Release, .AcqRel => { 10530 return sema.fail( 10531 block, 10532 order_src, 10533 "@atomicLoad atomic ordering must not be Release or AcqRel", 10534 .{}, 10535 ); 10536 }, 10537 else => {}, 10538 } 10539 10540 if (try sema.typeHasOnePossibleValue(block, elem_ty_src, elem_ty)) |val| { 10541 return sema.addConstant(elem_ty, val); 10542 } 10543 10544 if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| { 10545 if (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) |elem_val| { 10546 return sema.addConstant(elem_ty, elem_val); 10547 } 10548 } 10549 10550 try sema.requireRuntimeBlock(block, ptr_src); 10551 return block.addInst(.{ 10552 .tag = .atomic_load, 10553 .data = .{ .atomic_load = .{ 10554 .ptr = ptr, 10555 .order = order, 10556 } }, 10557 }); 10558} 10559 10560fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 10561 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 10562 const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data; 10563 const src = inst_data.src(); 10564 // zig fmt: off 10565 const operand_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 10566 const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 10567 const op_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; 10568 const operand_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node }; 10569 const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg4 = inst_data.src_node }; 10570 // zig fmt: on 10571 const ptr = sema.resolveInst(extra.ptr); 10572 const ptr_ty = sema.typeOf(ptr); 10573 const operand_ty = ptr_ty.elemType(); 10574 try sema.checkAtomicOperandType(block, operand_ty_src, operand_ty); 10575 const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation); 10576 10577 switch (operand_ty.zigTypeTag()) { 10578 .Enum => if (op != .Xchg) { 10579 return sema.fail(block, op_src, "@atomicRmw with enum only allowed with .Xchg", .{}); 10580 }, 10581 .Bool => if (op != .Xchg) { 10582 return sema.fail(block, op_src, "@atomicRmw with bool only allowed with .Xchg", .{}); 10583 }, 10584 .Float => switch (op) { 10585 .Xchg, .Add, .Sub => {}, 10586 else => return sema.fail(block, op_src, "@atomicRmw with float only allowed with .Xchg, .Add, and .Sub", .{}), 10587 }, 10588 else => {}, 10589 } 10590 const operand = try sema.coerce(block, operand_ty, sema.resolveInst(extra.operand), operand_src); 10591 const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering); 10592 10593 if (order == .Unordered) { 10594 return sema.fail(block, order_src, "@atomicRmw atomic ordering must not be Unordered", .{}); 10595 } 10596 10597 // special case zero bit types 10598 if (try sema.typeHasOnePossibleValue(block, operand_ty_src, operand_ty)) |val| { 10599 return sema.addConstant(operand_ty, val); 10600 } 10601 10602 const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { 10603 const maybe_operand_val = try sema.resolveMaybeUndefVal(block, operand_src, operand); 10604 const operand_val = maybe_operand_val orelse { 10605 try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src); 10606 break :rs operand_src; 10607 }; 10608 if (ptr_val.isComptimeMutablePtr()) { 10609 const target = sema.mod.getTarget(); 10610 const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; 10611 const new_val = switch (op) { 10612 // zig fmt: off 10613 .Xchg => operand_val, 10614 .Add => try stored_val.numberAddWrap(operand_val, operand_ty, sema.arena, target), 10615 .Sub => try stored_val.numberSubWrap(operand_val, operand_ty, sema.arena, target), 10616 .And => try stored_val.bitwiseAnd (operand_val, sema.arena), 10617 .Nand => try stored_val.bitwiseNand (operand_val, operand_ty, sema.arena, target), 10618 .Or => try stored_val.bitwiseOr (operand_val, sema.arena), 10619 .Xor => try stored_val.bitwiseXor (operand_val, sema.arena), 10620 .Max => try stored_val.numberMax (operand_val), 10621 .Min => try stored_val.numberMin (operand_val), 10622 // zig fmt: on 10623 }; 10624 try sema.storePtrVal(block, src, ptr_val, new_val, operand_ty); 10625 return sema.addConstant(operand_ty, stored_val); 10626 } else break :rs ptr_src; 10627 } else ptr_src; 10628 10629 const flags: u32 = @as(u32, @enumToInt(order)) | (@as(u32, @enumToInt(op)) << 3); 10630 10631 try sema.requireRuntimeBlock(block, runtime_src); 10632 return block.addInst(.{ 10633 .tag = .atomic_rmw, 10634 .data = .{ .pl_op = .{ 10635 .operand = ptr, 10636 .payload = try sema.addExtra(Air.AtomicRmw{ 10637 .operand = operand, 10638 .flags = flags, 10639 }), 10640 } }, 10641 }); 10642} 10643 10644fn zirAtomicStore(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 10645 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 10646 const extra = sema.code.extraData(Zir.Inst.AtomicStore, inst_data.payload_index).data; 10647 const src = inst_data.src(); 10648 // zig fmt: off 10649 const operand_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 10650 const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 10651 const operand_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; 10652 const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node }; 10653 // zig fmt: on 10654 const ptr = sema.resolveInst(extra.ptr); 10655 const operand_ty = sema.typeOf(ptr).elemType(); 10656 try sema.checkAtomicOperandType(block, operand_ty_src, operand_ty); 10657 const operand = try sema.coerce(block, operand_ty, sema.resolveInst(extra.operand), operand_src); 10658 const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering); 10659 10660 const air_tag: Air.Inst.Tag = switch (order) { 10661 .Acquire, .AcqRel => { 10662 return sema.fail( 10663 block, 10664 order_src, 10665 "@atomicStore atomic ordering must not be Acquire or AcqRel", 10666 .{}, 10667 ); 10668 }, 10669 .Unordered => .atomic_store_unordered, 10670 .Monotonic => .atomic_store_monotonic, 10671 .Release => .atomic_store_release, 10672 .SeqCst => .atomic_store_seq_cst, 10673 }; 10674 10675 return sema.storePtr2(block, src, ptr, ptr_src, operand, operand_src, air_tag); 10676} 10677 10678fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 10679 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 10680 const src = inst_data.src(); 10681 return sema.fail(block, src, "TODO: Sema.zirMulAdd", .{}); 10682} 10683 10684fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 10685 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 10686 const src = inst_data.src(); 10687 return sema.fail(block, src, "TODO: Sema.zirBuiltinCall", .{}); 10688} 10689 10690fn zirFieldPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 10691 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 10692 const src = inst_data.src(); 10693 return sema.fail(block, src, "TODO: Sema.zirFieldPtrType", .{}); 10694} 10695 10696fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 10697 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 10698 const src = inst_data.src(); 10699 return sema.fail(block, src, "TODO: Sema.zirFieldParentPtr", .{}); 10700} 10701 10702fn zirMinMax( 10703 sema: *Sema, 10704 block: *Block, 10705 inst: Zir.Inst.Index, 10706 air_tag: Air.Inst.Tag, 10707) CompileError!Air.Inst.Ref { 10708 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 10709 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; 10710 const src = inst_data.src(); 10711 const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 10712 const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 10713 const lhs = sema.resolveInst(extra.lhs); 10714 const rhs = sema.resolveInst(extra.rhs); 10715 try sema.checkNumericType(block, lhs_src, sema.typeOf(lhs)); 10716 try sema.checkNumericType(block, rhs_src, sema.typeOf(rhs)); 10717 const simd_op = try sema.checkSimdBinOp(block, src, lhs, rhs, lhs_src, rhs_src); 10718 10719 // TODO @maximum(max_int, undefined) should return max_int 10720 10721 const runtime_src = if (simd_op.lhs_val) |lhs_val| rs: { 10722 if (lhs_val.isUndef()) return sema.addConstUndef(simd_op.result_ty); 10723 10724 const rhs_val = simd_op.rhs_val orelse break :rs rhs_src; 10725 10726 if (rhs_val.isUndef()) return sema.addConstUndef(simd_op.result_ty); 10727 10728 const opFunc = switch (air_tag) { 10729 .min => Value.numberMin, 10730 .max => Value.numberMax, 10731 else => unreachable, 10732 }; 10733 const vec_len = simd_op.len orelse { 10734 const result_val = try opFunc(lhs_val, rhs_val); 10735 return sema.addConstant(simd_op.result_ty, result_val); 10736 }; 10737 var lhs_buf: Value.ElemValueBuffer = undefined; 10738 var rhs_buf: Value.ElemValueBuffer = undefined; 10739 const elems = try sema.arena.alloc(Value, vec_len); 10740 for (elems) |*elem, i| { 10741 const lhs_elem_val = lhs_val.elemValueBuffer(i, &lhs_buf); 10742 const rhs_elem_val = rhs_val.elemValueBuffer(i, &rhs_buf); 10743 elem.* = try opFunc(lhs_elem_val, rhs_elem_val); 10744 } 10745 return sema.addConstant( 10746 simd_op.result_ty, 10747 try Value.Tag.array.create(sema.arena, elems), 10748 ); 10749 } else rs: { 10750 if (simd_op.rhs_val) |rhs_val| { 10751 if (rhs_val.isUndef()) return sema.addConstUndef(simd_op.result_ty); 10752 } 10753 break :rs lhs_src; 10754 }; 10755 10756 try sema.requireRuntimeBlock(block, runtime_src); 10757 return block.addBinOp(air_tag, simd_op.lhs, simd_op.rhs); 10758} 10759 10760fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 10761 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 10762 const extra = sema.code.extraData(Zir.Inst.Memcpy, inst_data.payload_index).data; 10763 const src = inst_data.src(); 10764 const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 10765 const src_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 10766 const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; 10767 const dest_ptr = sema.resolveInst(extra.dest); 10768 const dest_ptr_ty = sema.typeOf(dest_ptr); 10769 10770 if (dest_ptr_ty.zigTypeTag() != .Pointer) { 10771 return sema.fail(block, dest_src, "expected pointer, found '{}'", .{dest_ptr_ty}); 10772 } 10773 if (dest_ptr_ty.isConstPtr()) { 10774 return sema.fail(block, dest_src, "cannot store through const pointer '{}'", .{dest_ptr_ty}); 10775 } 10776 10777 const uncasted_src_ptr = sema.resolveInst(extra.source); 10778 const uncasted_src_ptr_ty = sema.typeOf(uncasted_src_ptr); 10779 if (uncasted_src_ptr_ty.zigTypeTag() != .Pointer) { 10780 return sema.fail(block, src_src, "expected pointer, found '{}'", .{ 10781 uncasted_src_ptr_ty, 10782 }); 10783 } 10784 const src_ptr_info = uncasted_src_ptr_ty.ptrInfo().data; 10785 const wanted_src_ptr_ty = try Type.ptr(sema.arena, .{ 10786 .pointee_type = dest_ptr_ty.elemType2(), 10787 .@"align" = src_ptr_info.@"align", 10788 .@"addrspace" = src_ptr_info.@"addrspace", 10789 .mutable = false, 10790 .@"allowzero" = src_ptr_info.@"allowzero", 10791 .@"volatile" = src_ptr_info.@"volatile", 10792 .size = .Many, 10793 }); 10794 const src_ptr = try sema.coerce(block, wanted_src_ptr_ty, uncasted_src_ptr, src_src); 10795 const len = try sema.coerce(block, Type.usize, sema.resolveInst(extra.byte_count), len_src); 10796 10797 const maybe_dest_ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr); 10798 const maybe_src_ptr_val = try sema.resolveDefinedValue(block, src_src, src_ptr); 10799 const maybe_len_val = try sema.resolveDefinedValue(block, len_src, len); 10800 10801 const runtime_src = if (maybe_dest_ptr_val) |dest_ptr_val| rs: { 10802 if (maybe_src_ptr_val) |src_ptr_val| { 10803 if (maybe_len_val) |len_val| { 10804 _ = dest_ptr_val; 10805 _ = src_ptr_val; 10806 _ = len_val; 10807 return sema.fail(block, src, "TODO: Sema.zirMemcpy at comptime", .{}); 10808 } else break :rs len_src; 10809 } else break :rs src_src; 10810 } else dest_src; 10811 10812 try sema.requireRuntimeBlock(block, runtime_src); 10813 _ = try block.addInst(.{ 10814 .tag = .memcpy, 10815 .data = .{ .pl_op = .{ 10816 .operand = dest_ptr, 10817 .payload = try sema.addExtra(Air.Bin{ 10818 .lhs = src_ptr, 10819 .rhs = len, 10820 }), 10821 } }, 10822 }); 10823} 10824 10825fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { 10826 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 10827 const extra = sema.code.extraData(Zir.Inst.Memset, inst_data.payload_index).data; 10828 const src = inst_data.src(); 10829 const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; 10830 const value_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; 10831 const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; 10832 const dest_ptr = sema.resolveInst(extra.dest); 10833 const dest_ptr_ty = sema.typeOf(dest_ptr); 10834 if (dest_ptr_ty.zigTypeTag() != .Pointer) { 10835 return sema.fail(block, dest_src, "expected pointer, found '{}'", .{dest_ptr_ty}); 10836 } 10837 if (dest_ptr_ty.isConstPtr()) { 10838 return sema.fail(block, dest_src, "cannot store through const pointer '{}'", .{dest_ptr_ty}); 10839 } 10840 const elem_ty = dest_ptr_ty.elemType2(); 10841 const value = try sema.coerce(block, elem_ty, sema.resolveInst(extra.byte), value_src); 10842 const len = try sema.coerce(block, Type.usize, sema.resolveInst(extra.byte_count), len_src); 10843 10844 const maybe_dest_ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr); 10845 const maybe_len_val = try sema.resolveDefinedValue(block, len_src, len); 10846 10847 const runtime_src = if (maybe_dest_ptr_val) |ptr_val| rs: { 10848 if (maybe_len_val) |len_val| { 10849 if (try sema.resolveMaybeUndefVal(block, value_src, value)) |val| { 10850 _ = ptr_val; 10851 _ = len_val; 10852 _ = val; 10853 return sema.fail(block, src, "TODO: Sema.zirMemset at comptime", .{}); 10854 } else break :rs value_src; 10855 } else break :rs len_src; 10856 } else dest_src; 10857 10858 try sema.requireRuntimeBlock(block, runtime_src); 10859 _ = try block.addInst(.{ 10860 .tag = .memset, 10861 .data = .{ .pl_op = .{ 10862 .operand = dest_ptr, 10863 .payload = try sema.addExtra(Air.Bin{ 10864 .lhs = value, 10865 .rhs = len, 10866 }), 10867 } }, 10868 }); 10869} 10870 10871fn zirBuiltinAsyncCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 10872 const inst_data = sema.code.instructions.items(.data)[inst].pl_node; 10873 const src = inst_data.src(); 10874 return sema.fail(block, src, "TODO: Sema.zirBuiltinAsyncCall", .{}); 10875} 10876 10877fn zirResume(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { 10878 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 10879 const src = inst_data.src(); 10880 return sema.fail(block, src, "TODO: Sema.zirResume", .{}); 10881} 10882 10883fn zirAwait( 10884 sema: *Sema, 10885 block: *Block, 10886 inst: Zir.Inst.Index, 10887 is_nosuspend: bool, 10888) CompileError!Air.Inst.Ref { 10889 const inst_data = sema.code.instructions.items(.data)[inst].un_node; 10890 const src = inst_data.src(); 10891 10892 _ = is_nosuspend; 10893 return sema.fail(block, src, "TODO: Sema.zirAwait", .{}); 10894} 10895 10896fn zirVarExtended( 10897 sema: *Sema, 10898 block: *Block, 10899 extended: Zir.Inst.Extended.InstData, 10900) CompileError!Air.Inst.Ref { 10901 const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); 10902 const src = sema.src; 10903 const ty_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at type 10904 const mut_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at mut token 10905 const init_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at init expr 10906 const small = @bitCast(Zir.Inst.ExtendedVar.Small, extended.small); 10907 10908 var extra_index: usize = extra.end; 10909 10910 const lib_name: ?[]const u8 = if (small.has_lib_name) blk: { 10911 const lib_name = sema.code.nullTerminatedString(sema.code.extra[extra_index]); 10912 extra_index += 1; 10913 break :blk lib_name; 10914 } else null; 10915 10916 // ZIR supports encoding this information but it is not used; the information 10917 // is encoded via the Decl entry. 10918 assert(!small.has_align); 10919 //const align_val: Value = if (small.has_align) blk: { 10920 // const align_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); 10921 // extra_index += 1; 10922 // const align_tv = try sema.resolveInstConst(block, align_src, align_ref); 10923 // break :blk align_tv.val; 10924 //} else Value.@"null"; 10925 10926 const uncasted_init: Air.Inst.Ref = if (small.has_init) blk: { 10927 const init_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); 10928 extra_index += 1; 10929 break :blk sema.resolveInst(init_ref); 10930 } else .none; 10931 10932 const have_ty = extra.data.var_type != .none; 10933 const var_ty = if (have_ty) 10934 try sema.resolveType(block, ty_src, extra.data.var_type) 10935 else 10936 sema.typeOf(uncasted_init); 10937 10938 const init_val = if (uncasted_init != .none) blk: { 10939 const init = if (have_ty) 10940 try sema.coerce(block, var_ty, uncasted_init, init_src) 10941 else 10942 uncasted_init; 10943 10944 break :blk (try sema.resolveMaybeUndefVal(block, init_src, init)) orelse 10945 return sema.failWithNeededComptime(block, init_src); 10946 } else Value.initTag(.unreachable_value); 10947 10948 try sema.validateVarType(block, mut_src, var_ty, small.is_extern); 10949 10950 if (lib_name != null) { 10951 // Look at the sema code for functions which has this logic, it just needs to 10952 // be extracted and shared by both var and func 10953 return sema.fail(block, src, "TODO: handle var with lib_name in Sema", .{}); 10954 } 10955 10956 const new_var = try sema.gpa.create(Module.Var); 10957 10958 log.debug("created variable {*} owner_decl: {*} ({s})", .{ 10959 new_var, sema.owner_decl, sema.owner_decl.name, 10960 }); 10961 10962 new_var.* = .{ 10963 .owner_decl = sema.owner_decl, 10964 .init = init_val, 10965 .is_extern = small.is_extern, 10966 .is_mutable = true, // TODO get rid of this unused field 10967 .is_threadlocal = small.is_threadlocal, 10968 }; 10969 const result = try sema.addConstant( 10970 var_ty, 10971 try Value.Tag.variable.create(sema.arena, new_var), 10972 ); 10973 return result; 10974} 10975 10976fn zirFuncExtended( 10977 sema: *Sema, 10978 block: *Block, 10979 extended: Zir.Inst.Extended.InstData, 10980 inst: Zir.Inst.Index, 10981) CompileError!Air.Inst.Ref { 10982 const tracy = trace(@src()); 10983 defer tracy.end(); 10984 10985 const extra = sema.code.extraData(Zir.Inst.ExtendedFunc, extended.operand); 10986 const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; 10987 const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = extra.data.src_node }; 10988 const align_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at align 10989 const small = @bitCast(Zir.Inst.ExtendedFunc.Small, extended.small); 10990 10991 var extra_index: usize = extra.end; 10992 10993 const lib_name: ?[]const u8 = if (small.has_lib_name) blk: { 10994 const lib_name = sema.code.nullTerminatedString(sema.code.extra[extra_index]); 10995 extra_index += 1; 10996 break :blk lib_name; 10997 } else null; 10998 10999 const cc: std.builtin.CallingConvention = if (small.has_cc) blk: { 11000 const cc_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); 11001 extra_index += 1; 11002 const cc_tv = try sema.resolveInstConst(block, cc_src, cc_ref); 11003 break :blk cc_tv.val.toEnum(std.builtin.CallingConvention); 11004 } else .Unspecified; 11005 11006 const align_val: Value = if (small.has_align) blk: { 11007 const align_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); 11008 extra_index += 1; 11009 const align_tv = try sema.resolveInstConst(block, align_src, align_ref); 11010 break :blk align_tv.val; 11011 } else Value.@"null"; 11012 11013 const ret_ty_body = sema.code.extra[extra_index..][0..extra.data.ret_body_len]; 11014 extra_index += ret_ty_body.len; 11015 11016 var body_inst: Zir.Inst.Index = 0; 11017 var src_locs: Zir.Inst.Func.SrcLocs = undefined; 11018 if (extra.data.body_len != 0) { 11019 body_inst = inst; 11020 extra_index += extra.data.body_len; 11021 src_locs = sema.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data; 11022 } 11023 11024 const is_var_args = small.is_var_args; 11025 const is_inferred_error = small.is_inferred_error; 11026 const is_extern = small.is_extern; 11027 11028 return sema.funcCommon( 11029 block, 11030 extra.data.src_node, 11031 body_inst, 11032 ret_ty_body, 11033 cc, 11034 align_val, 11035 is_var_args, 11036 is_inferred_error, 11037 is_extern, 11038 src_locs, 11039 lib_name, 11040 ); 11041} 11042 11043fn zirCUndef( 11044 sema: *Sema, 11045 block: *Block, 11046 extended: Zir.Inst.Extended.InstData, 11047) CompileError!Air.Inst.Ref { 11048 const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; 11049 const src: LazySrcLoc = .{ .node_offset = extra.node }; 11050 11051 const name = try sema.resolveConstString(block, src, extra.operand); 11052 try block.c_import_buf.?.writer().print("#undefine {s}\n", .{name}); 11053 return Air.Inst.Ref.void_value; 11054} 11055 11056fn zirCInclude( 11057 sema: *Sema, 11058 block: *Block, 11059 extended: Zir.Inst.Extended.InstData, 11060) CompileError!Air.Inst.Ref { 11061 const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; 11062 const src: LazySrcLoc = .{ .node_offset = extra.node }; 11063 11064 const name = try sema.resolveConstString(block, src, extra.operand); 11065 try block.c_import_buf.?.writer().print("#include <{s}>\n", .{name}); 11066 return Air.Inst.Ref.void_value; 11067} 11068 11069fn zirCDefine( 11070 sema: *Sema, 11071 block: *Block, 11072 extended: Zir.Inst.Extended.InstData, 11073) CompileError!Air.Inst.Ref { 11074 const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; 11075 const src: LazySrcLoc = .{ .node_offset = extra.node }; 11076 11077 const name = try sema.resolveConstString(block, src, extra.lhs); 11078 const rhs = sema.resolveInst(extra.rhs); 11079 if (sema.typeOf(rhs).zigTypeTag() != .Void) { 11080 const value = try sema.resolveConstString(block, src, extra.rhs); 11081 try block.c_import_buf.?.writer().print("#define {s} {s}\n", .{ name, value }); 11082 } else { 11083 try block.c_import_buf.?.writer().print("#define {s}\n", .{name}); 11084 } 11085 return Air.Inst.Ref.void_value; 11086} 11087 11088fn zirWasmMemorySize( 11089 sema: *Sema, 11090 block: *Block, 11091 extended: Zir.Inst.Extended.InstData, 11092) CompileError!Air.Inst.Ref { 11093 const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; 11094 const src: LazySrcLoc = .{ .node_offset = extra.node }; 11095 return sema.fail(block, src, "TODO: implement Sema.zirWasmMemorySize", .{}); 11096} 11097 11098fn zirWasmMemoryGrow( 11099 sema: *Sema, 11100 block: *Block, 11101 extended: Zir.Inst.Extended.InstData, 11102) CompileError!Air.Inst.Ref { 11103 const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; 11104 const src: LazySrcLoc = .{ .node_offset = extra.node }; 11105 return sema.fail(block, src, "TODO: implement Sema.zirWasmMemoryGrow", .{}); 11106} 11107 11108fn zirPrefetch( 11109 sema: *Sema, 11110 block: *Block, 11111 extended: Zir.Inst.Extended.InstData, 11112) CompileError!Air.Inst.Ref { 11113 const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; 11114 const src: LazySrcLoc = .{ .node_offset = extra.node }; 11115 return sema.fail(block, src, "TODO: implement Sema.zirPrefetch", .{}); 11116} 11117 11118fn zirBuiltinExtern( 11119 sema: *Sema, 11120 block: *Block, 11121 extended: Zir.Inst.Extended.InstData, 11122) CompileError!Air.Inst.Ref { 11123 const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; 11124 const src: LazySrcLoc = .{ .node_offset = extra.node }; 11125 return sema.fail(block, src, "TODO: implement Sema.zirBuiltinExtern", .{}); 11126} 11127 11128fn requireFunctionBlock(sema: *Sema, block: *Block, src: LazySrcLoc) !void { 11129 if (sema.func == null) { 11130 return sema.fail(block, src, "instruction illegal outside function body", .{}); 11131 } 11132} 11133 11134fn requireRuntimeBlock(sema: *Sema, block: *Block, src: LazySrcLoc) !void { 11135 if (block.is_comptime) { 11136 return sema.failWithNeededComptime(block, src); 11137 } 11138 try sema.requireFunctionBlock(block, src); 11139} 11140 11141/// Emit a compile error if type cannot be used for a runtime variable. 11142fn validateVarType( 11143 sema: *Sema, 11144 block: *Block, 11145 src: LazySrcLoc, 11146 var_ty: Type, 11147 is_extern: bool, 11148) CompileError!void { 11149 var ty = var_ty; 11150 while (true) switch (ty.zigTypeTag()) { 11151 .Bool, 11152 .Int, 11153 .Float, 11154 .ErrorSet, 11155 .Enum, 11156 .Frame, 11157 .AnyFrame, 11158 .Void, 11159 => return, 11160 11161 .BoundFn, 11162 .ComptimeFloat, 11163 .ComptimeInt, 11164 .EnumLiteral, 11165 .NoReturn, 11166 .Type, 11167 .Undefined, 11168 .Null, 11169 => break, 11170 11171 .Pointer => { 11172 const elem_ty = ty.childType(); 11173 if (elem_ty.zigTypeTag() == .Opaque) return; 11174 ty = elem_ty; 11175 }, 11176 .Opaque => if (is_extern) return else break, 11177 11178 .Optional => { 11179 var buf: Type.Payload.ElemType = undefined; 11180 const child_ty = ty.optionalChild(&buf); 11181 return validateVarType(sema, block, src, child_ty, is_extern); 11182 }, 11183 .Array, .Vector => ty = ty.elemType(), 11184 11185 .ErrorUnion => ty = ty.errorUnionPayload(), 11186 11187 .Fn, .Struct, .Union => { 11188 const resolved_ty = try sema.resolveTypeFields(block, src, ty); 11189 if (resolved_ty.requiresComptime()) { 11190 break; 11191 } else { 11192 return; 11193 } 11194 }, 11195 } else unreachable; // TODO should not need else unreachable 11196 11197 return sema.fail(block, src, "variable of type '{}' must be const or comptime", .{var_ty}); 11198} 11199 11200pub const PanicId = enum { 11201 unreach, 11202 unwrap_null, 11203 unwrap_errunion, 11204 cast_to_null, 11205 incorrect_alignment, 11206 invalid_error_code, 11207}; 11208 11209fn addSafetyCheck( 11210 sema: *Sema, 11211 parent_block: *Block, 11212 ok: Air.Inst.Ref, 11213 panic_id: PanicId, 11214) !void { 11215 const gpa = sema.gpa; 11216 11217 var fail_block: Block = .{ 11218 .parent = parent_block, 11219 .sema = sema, 11220 .src_decl = parent_block.src_decl, 11221 .namespace = parent_block.namespace, 11222 .wip_capture_scope = parent_block.wip_capture_scope, 11223 .instructions = .{}, 11224 .inlining = parent_block.inlining, 11225 .is_comptime = parent_block.is_comptime, 11226 }; 11227 11228 defer fail_block.instructions.deinit(gpa); 11229 11230 _ = try sema.safetyPanic(&fail_block, .unneeded, panic_id); 11231 11232 try parent_block.instructions.ensureUnusedCapacity(gpa, 1); 11233 11234 try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + 11235 1 + // The main block only needs space for the cond_br. 11236 @typeInfo(Air.CondBr).Struct.fields.len + 11237 1 + // The ok branch of the cond_br only needs space for the br. 11238 fail_block.instructions.items.len); 11239 11240 try sema.air_instructions.ensureUnusedCapacity(gpa, 3); 11241 const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); 11242 const cond_br_inst = block_inst + 1; 11243 const br_inst = cond_br_inst + 1; 11244 sema.air_instructions.appendAssumeCapacity(.{ 11245 .tag = .block, 11246 .data = .{ .ty_pl = .{ 11247 .ty = .void_type, 11248 .payload = sema.addExtraAssumeCapacity(Air.Block{ 11249 .body_len = 1, 11250 }), 11251 } }, 11252 }); 11253 sema.air_extra.appendAssumeCapacity(cond_br_inst); 11254 11255 sema.air_instructions.appendAssumeCapacity(.{ 11256 .tag = .cond_br, 11257 .data = .{ .pl_op = .{ 11258 .operand = ok, 11259 .payload = sema.addExtraAssumeCapacity(Air.CondBr{ 11260 .then_body_len = 1, 11261 .else_body_len = @intCast(u32, fail_block.instructions.items.len), 11262 }), 11263 } }, 11264 }); 11265 sema.air_extra.appendAssumeCapacity(br_inst); 11266 sema.air_extra.appendSliceAssumeCapacity(fail_block.instructions.items); 11267 11268 sema.air_instructions.appendAssumeCapacity(.{ 11269 .tag = .br, 11270 .data = .{ .br = .{ 11271 .block_inst = block_inst, 11272 .operand = .void_value, 11273 } }, 11274 }); 11275 11276 parent_block.instructions.appendAssumeCapacity(block_inst); 11277} 11278 11279fn panicWithMsg( 11280 sema: *Sema, 11281 block: *Block, 11282 src: LazySrcLoc, 11283 msg_inst: Air.Inst.Ref, 11284) !Zir.Inst.Index { 11285 const mod = sema.mod; 11286 const arena = sema.arena; 11287 11288 const this_feature_is_implemented_in_the_backend = 11289 mod.comp.bin_file.options.object_format == .c or 11290 mod.comp.bin_file.options.use_llvm; 11291 if (!this_feature_is_implemented_in_the_backend) { 11292 // TODO implement this feature in all the backends and then delete this branch 11293 _ = try block.addNoOp(.breakpoint); 11294 _ = try block.addNoOp(.unreach); 11295 return always_noreturn; 11296 } 11297 const panic_fn = try sema.getBuiltin(block, src, "panic"); 11298 const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace"); 11299 const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty); 11300 const ptr_stack_trace_ty = try Type.ptr(arena, .{ 11301 .pointee_type = stack_trace_ty, 11302 .@"addrspace" = target_util.defaultAddressSpace(mod.getTarget(), .global_constant), // TODO might need a place that is more dynamic 11303 }); 11304 const null_stack_trace = try sema.addConstant( 11305 try Type.optional(arena, ptr_stack_trace_ty), 11306 Value.@"null", 11307 ); 11308 const args = try arena.create([2]Air.Inst.Ref); 11309 args.* = .{ msg_inst, null_stack_trace }; 11310 _ = try sema.analyzeCall(block, panic_fn, src, src, .auto, false, args); 11311 return always_noreturn; 11312} 11313 11314fn safetyPanic( 11315 sema: *Sema, 11316 block: *Block, 11317 src: LazySrcLoc, 11318 panic_id: PanicId, 11319) CompileError!Zir.Inst.Index { 11320 const msg = switch (panic_id) { 11321 .unreach => "reached unreachable code", 11322 .unwrap_null => "attempt to use null value", 11323 .unwrap_errunion => "unreachable error occurred", 11324 .cast_to_null => "cast causes pointer to be null", 11325 .incorrect_alignment => "incorrect alignment", 11326 .invalid_error_code => "invalid error code", 11327 }; 11328 11329 const msg_inst = msg_inst: { 11330 // TODO instead of making a new decl for every panic in the entire compilation, 11331 // introduce the concept of a reference-counted decl for these 11332 var anon_decl = try block.startAnonDecl(); 11333 defer anon_decl.deinit(); 11334 break :msg_inst try sema.analyzeDeclRef(try anon_decl.finish( 11335 try Type.Tag.array_u8.create(anon_decl.arena(), msg.len), 11336 try Value.Tag.bytes.create(anon_decl.arena(), msg), 11337 )); 11338 }; 11339 11340 const casted_msg_inst = try sema.coerce(block, Type.initTag(.const_slice_u8), msg_inst, src); 11341 return sema.panicWithMsg(block, src, casted_msg_inst); 11342} 11343 11344fn emitBackwardBranch(sema: *Sema, block: *Block, src: LazySrcLoc) !void { 11345 sema.branch_count += 1; 11346 if (sema.branch_count > sema.branch_quota) { 11347 // TODO show the "called from here" stack 11348 return sema.fail(block, src, "evaluation exceeded {d} backwards branches", .{sema.branch_quota}); 11349 } 11350} 11351 11352fn fieldVal( 11353 sema: *Sema, 11354 block: *Block, 11355 src: LazySrcLoc, 11356 object: Air.Inst.Ref, 11357 field_name: []const u8, 11358 field_name_src: LazySrcLoc, 11359) CompileError!Air.Inst.Ref { 11360 // When editing this function, note that there is corresponding logic to be edited 11361 // in `fieldPtr`. This function takes a value and returns a value. 11362 11363 const arena = sema.arena; 11364 const object_src = src; // TODO better source location 11365 const object_ty = sema.typeOf(object); 11366 11367 // Zig allows dereferencing a single pointer during field lookup. Note that 11368 // we don't actually need to generate the dereference some field lookups, like the 11369 // length of arrays and other comptime operations. 11370 const is_pointer_to = object_ty.isSinglePointer(); 11371 11372 const inner_ty = if (is_pointer_to) 11373 object_ty.childType() 11374 else 11375 object_ty; 11376 11377 switch (inner_ty.zigTypeTag()) { 11378 .Array => { 11379 if (mem.eql(u8, field_name, "len")) { 11380 return sema.addConstant( 11381 Type.initTag(.comptime_int), 11382 try Value.Tag.int_u64.create(arena, inner_ty.arrayLen()), 11383 ); 11384 } else { 11385 return sema.fail( 11386 block, 11387 field_name_src, 11388 "no member named '{s}' in '{}'", 11389 .{ field_name, object_ty }, 11390 ); 11391 } 11392 }, 11393 .Pointer => if (inner_ty.isSlice()) { 11394 if (mem.eql(u8, field_name, "ptr")) { 11395 const slice = if (is_pointer_to) 11396 try sema.analyzeLoad(block, src, object, object_src) 11397 else 11398 object; 11399 return sema.analyzeSlicePtr(block, src, slice, inner_ty, object_src); 11400 } else if (mem.eql(u8, field_name, "len")) { 11401 const slice = if (is_pointer_to) 11402 try sema.analyzeLoad(block, src, object, object_src) 11403 else 11404 object; 11405 return sema.analyzeSliceLen(block, src, slice); 11406 } else { 11407 return sema.fail( 11408 block, 11409 field_name_src, 11410 "no member named '{s}' in '{}'", 11411 .{ field_name, object_ty }, 11412 ); 11413 } 11414 }, 11415 .Type => { 11416 const dereffed_type = if (is_pointer_to) 11417 try sema.analyzeLoad(block, src, object, object_src) 11418 else 11419 object; 11420 11421 const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?; 11422 var to_type_buffer: Value.ToTypeBuffer = undefined; 11423 const child_type = val.toType(&to_type_buffer); 11424 11425 switch (child_type.zigTypeTag()) { 11426 .ErrorSet => { 11427 const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { 11428 const error_set = payload.data; 11429 // TODO this is O(N). I'm putting off solving this until we solve inferred 11430 // error sets at the same time. 11431 const names = error_set.names_ptr[0..error_set.names_len]; 11432 for (names) |name| { 11433 if (mem.eql(u8, field_name, name)) { 11434 break :blk name; 11435 } 11436 } 11437 return sema.fail(block, src, "no error named '{s}' in '{}'", .{ 11438 field_name, child_type, 11439 }); 11440 } else (try sema.mod.getErrorValue(field_name)).key; 11441 11442 return sema.addConstant( 11443 try child_type.copy(arena), 11444 try Value.Tag.@"error".create(arena, .{ .name = name }), 11445 ); 11446 }, 11447 .Union => { 11448 if (child_type.getNamespace()) |namespace| { 11449 if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { 11450 return inst; 11451 } 11452 } 11453 if (child_type.unionTagType()) |enum_ty| { 11454 if (enum_ty.enumFieldIndex(field_name)) |field_index_usize| { 11455 const field_index = @intCast(u32, field_index_usize); 11456 return sema.addConstant( 11457 enum_ty, 11458 try Value.Tag.enum_field_index.create(sema.arena, field_index), 11459 ); 11460 } 11461 } 11462 return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); 11463 }, 11464 .Enum => { 11465 if (child_type.getNamespace()) |namespace| { 11466 if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { 11467 return inst; 11468 } 11469 } 11470 const field_index_usize = child_type.enumFieldIndex(field_name) orelse 11471 return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); 11472 const field_index = @intCast(u32, field_index_usize); 11473 const enum_val = try Value.Tag.enum_field_index.create(arena, field_index); 11474 return sema.addConstant(try child_type.copy(arena), enum_val); 11475 }, 11476 .Struct, .Opaque => { 11477 if (child_type.getNamespace()) |namespace| { 11478 if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { 11479 return inst; 11480 } 11481 } 11482 // TODO add note: declared here 11483 const kw_name = switch (child_type.zigTypeTag()) { 11484 .Struct => "struct", 11485 .Opaque => "opaque", 11486 .Union => "union", 11487 else => unreachable, 11488 }; 11489 return sema.fail(block, src, "{s} '{}' has no member named '{s}'", .{ 11490 kw_name, child_type, field_name, 11491 }); 11492 }, 11493 else => return sema.fail(block, src, "type '{}' has no members", .{child_type}), 11494 } 11495 }, 11496 .Struct => if (is_pointer_to) { 11497 // Avoid loading the entire struct by fetching a pointer and loading that 11498 const field_ptr = try sema.structFieldPtr(block, src, object, field_name, field_name_src, inner_ty); 11499 return sema.analyzeLoad(block, src, field_ptr, object_src); 11500 } else { 11501 return sema.structFieldVal(block, src, object, field_name, field_name_src, inner_ty); 11502 }, 11503 .Union => if (is_pointer_to) { 11504 // Avoid loading the entire union by fetching a pointer and loading that 11505 const field_ptr = try sema.unionFieldPtr(block, src, object, field_name, field_name_src, inner_ty); 11506 return sema.analyzeLoad(block, src, field_ptr, object_src); 11507 } else { 11508 return sema.unionFieldVal(block, src, object, field_name, field_name_src, inner_ty); 11509 }, 11510 else => {}, 11511 } 11512 return sema.fail(block, src, "type '{}' does not support field access", .{object_ty}); 11513} 11514 11515fn fieldPtr( 11516 sema: *Sema, 11517 block: *Block, 11518 src: LazySrcLoc, 11519 object_ptr: Air.Inst.Ref, 11520 field_name: []const u8, 11521 field_name_src: LazySrcLoc, 11522) CompileError!Air.Inst.Ref { 11523 // When editing this function, note that there is corresponding logic to be edited 11524 // in `fieldVal`. This function takes a pointer and returns a pointer. 11525 11526 const object_ptr_src = src; // TODO better source location 11527 const object_ptr_ty = sema.typeOf(object_ptr); 11528 const object_ty = switch (object_ptr_ty.zigTypeTag()) { 11529 .Pointer => object_ptr_ty.elemType(), 11530 else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty}), 11531 }; 11532 11533 // Zig allows dereferencing a single pointer during field lookup. Note that 11534 // we don't actually need to generate the dereference some field lookups, like the 11535 // length of arrays and other comptime operations. 11536 const is_pointer_to = object_ty.isSinglePointer(); 11537 11538 const inner_ty = if (is_pointer_to) 11539 object_ty.childType() 11540 else 11541 object_ty; 11542 11543 switch (inner_ty.zigTypeTag()) { 11544 .Array => { 11545 if (mem.eql(u8, field_name, "len")) { 11546 var anon_decl = try block.startAnonDecl(); 11547 defer anon_decl.deinit(); 11548 return sema.analyzeDeclRef(try anon_decl.finish( 11549 Type.initTag(.comptime_int), 11550 try Value.Tag.int_u64.create(anon_decl.arena(), inner_ty.arrayLen()), 11551 )); 11552 } else { 11553 return sema.fail( 11554 block, 11555 field_name_src, 11556 "no member named '{s}' in '{}'", 11557 .{ field_name, object_ty }, 11558 ); 11559 } 11560 }, 11561 .Pointer => if (inner_ty.isSlice()) { 11562 const inner_ptr = if (is_pointer_to) 11563 try sema.analyzeLoad(block, src, object_ptr, object_ptr_src) 11564 else 11565 object_ptr; 11566 11567 if (mem.eql(u8, field_name, "ptr")) { 11568 const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); 11569 const slice_ptr_ty = inner_ty.slicePtrFieldType(buf); 11570 11571 if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { 11572 var anon_decl = try block.startAnonDecl(); 11573 defer anon_decl.deinit(); 11574 11575 return sema.analyzeDeclRef(try anon_decl.finish( 11576 try slice_ptr_ty.copy(anon_decl.arena()), 11577 try val.slicePtr().copy(anon_decl.arena()), 11578 )); 11579 } 11580 try sema.requireRuntimeBlock(block, src); 11581 11582 const result_ty = try Type.ptr(sema.arena, .{ 11583 .pointee_type = slice_ptr_ty, 11584 .mutable = object_ptr_ty.ptrIsMutable(), 11585 .@"addrspace" = object_ptr_ty.ptrAddressSpace(), 11586 }); 11587 11588 return block.addTyOp(.ptr_slice_ptr_ptr, result_ty, inner_ptr); 11589 } else if (mem.eql(u8, field_name, "len")) { 11590 if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { 11591 var anon_decl = try block.startAnonDecl(); 11592 defer anon_decl.deinit(); 11593 11594 return sema.analyzeDeclRef(try anon_decl.finish( 11595 Type.usize, 11596 try Value.Tag.int_u64.create(anon_decl.arena(), val.sliceLen()), 11597 )); 11598 } 11599 try sema.requireRuntimeBlock(block, src); 11600 11601 const result_ty = try Type.ptr(sema.arena, .{ 11602 .pointee_type = Type.usize, 11603 .mutable = object_ptr_ty.ptrIsMutable(), 11604 .@"addrspace" = object_ptr_ty.ptrAddressSpace(), 11605 }); 11606 11607 return block.addTyOp(.ptr_slice_len_ptr, result_ty, inner_ptr); 11608 } else { 11609 return sema.fail( 11610 block, 11611 field_name_src, 11612 "no member named '{s}' in '{}'", 11613 .{ field_name, object_ty }, 11614 ); 11615 } 11616 }, 11617 .Type => { 11618 _ = try sema.resolveConstValue(block, object_ptr_src, object_ptr); 11619 const result = try sema.analyzeLoad(block, src, object_ptr, object_ptr_src); 11620 const inner = if (is_pointer_to) 11621 try sema.analyzeLoad(block, src, result, object_ptr_src) 11622 else 11623 result; 11624 11625 const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?; 11626 var to_type_buffer: Value.ToTypeBuffer = undefined; 11627 const child_type = val.toType(&to_type_buffer); 11628 11629 switch (child_type.zigTypeTag()) { 11630 .ErrorSet => { 11631 // TODO resolve inferred error sets 11632 const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { 11633 const error_set = payload.data; 11634 // TODO this is O(N). I'm putting off solving this until we solve inferred 11635 // error sets at the same time. 11636 const names = error_set.names_ptr[0..error_set.names_len]; 11637 for (names) |name| { 11638 if (mem.eql(u8, field_name, name)) { 11639 break :blk name; 11640 } 11641 } 11642 return sema.fail(block, src, "no error named '{s}' in '{}'", .{ 11643 field_name, child_type, 11644 }); 11645 } else (try sema.mod.getErrorValue(field_name)).key; 11646 11647 var anon_decl = try block.startAnonDecl(); 11648 defer anon_decl.deinit(); 11649 return sema.analyzeDeclRef(try anon_decl.finish( 11650 try child_type.copy(anon_decl.arena()), 11651 try Value.Tag.@"error".create(anon_decl.arena(), .{ .name = name }), 11652 )); 11653 }, 11654 .Union => { 11655 if (child_type.getNamespace()) |namespace| { 11656 if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { 11657 return inst; 11658 } 11659 } 11660 if (child_type.unionTagType()) |enum_ty| { 11661 if (enum_ty.enumFieldIndex(field_name)) |field_index| { 11662 const field_index_u32 = @intCast(u32, field_index); 11663 var anon_decl = try block.startAnonDecl(); 11664 defer anon_decl.deinit(); 11665 return sema.analyzeDeclRef(try anon_decl.finish( 11666 try enum_ty.copy(anon_decl.arena()), 11667 try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32), 11668 )); 11669 } 11670 } 11671 return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); 11672 }, 11673 .Enum => { 11674 if (child_type.getNamespace()) |namespace| { 11675 if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { 11676 return inst; 11677 } 11678 } 11679 const field_index = child_type.enumFieldIndex(field_name) orelse { 11680 return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); 11681 }; 11682 const field_index_u32 = @intCast(u32, field_index); 11683 var anon_decl = try block.startAnonDecl(); 11684 defer anon_decl.deinit(); 11685 return sema.analyzeDeclRef(try anon_decl.finish( 11686 try child_type.copy(anon_decl.arena()), 11687 try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32), 11688 )); 11689 }, 11690 .Struct, .Opaque => { 11691 if (child_type.getNamespace()) |namespace| { 11692 if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { 11693 return inst; 11694 } 11695 } 11696 return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); 11697 }, 11698 else => return sema.fail(block, src, "type '{}' has no members", .{child_type}), 11699 } 11700 }, 11701 .Struct => { 11702 const inner_ptr = if (is_pointer_to) 11703 try sema.analyzeLoad(block, src, object_ptr, object_ptr_src) 11704 else 11705 object_ptr; 11706 return sema.structFieldPtr(block, src, inner_ptr, field_name, field_name_src, inner_ty); 11707 }, 11708 .Union => { 11709 const inner_ptr = if (is_pointer_to) 11710 try sema.analyzeLoad(block, src, object_ptr, object_ptr_src) 11711 else 11712 object_ptr; 11713 return sema.unionFieldPtr(block, src, inner_ptr, field_name, field_name_src, inner_ty); 11714 }, 11715 else => {}, 11716 } 11717 return sema.fail(block, src, "type '{}' does not support field access (fieldPtr, {}.{s})", .{ object_ty, object_ptr_ty, field_name }); 11718} 11719 11720fn fieldCallBind( 11721 sema: *Sema, 11722 block: *Block, 11723 src: LazySrcLoc, 11724 raw_ptr: Air.Inst.Ref, 11725 field_name: []const u8, 11726 field_name_src: LazySrcLoc, 11727) CompileError!Air.Inst.Ref { 11728 // When editing this function, note that there is corresponding logic to be edited 11729 // in `fieldVal`. This function takes a pointer and returns a pointer. 11730 11731 const raw_ptr_src = src; // TODO better source location 11732 const raw_ptr_ty = sema.typeOf(raw_ptr); 11733 const inner_ty = if (raw_ptr_ty.zigTypeTag() == .Pointer and raw_ptr_ty.ptrSize() == .One) 11734 raw_ptr_ty.childType() 11735 else 11736 return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty}); 11737 11738 // Optionally dereference a second pointer to get the concrete type. 11739 const is_double_ptr = inner_ty.zigTypeTag() == .Pointer and inner_ty.ptrSize() == .One; 11740 const concrete_ty = if (is_double_ptr) inner_ty.childType() else inner_ty; 11741 const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty; 11742 const object_ptr = if (is_double_ptr) 11743 try sema.analyzeLoad(block, src, raw_ptr, src) 11744 else 11745 raw_ptr; 11746 11747 const arena = sema.arena; 11748 find_field: { 11749 switch (concrete_ty.zigTypeTag()) { 11750 .Struct => { 11751 const struct_ty = try sema.resolveTypeFields(block, src, concrete_ty); 11752 const struct_obj = struct_ty.castTag(.@"struct").?.data; 11753 11754 const field_index_usize = struct_obj.fields.getIndex(field_name) orelse 11755 break :find_field; 11756 const field_index = @intCast(u32, field_index_usize); 11757 const field = struct_obj.fields.values()[field_index]; 11758 11759 const ptr_field_ty = try Type.ptr(arena, .{ 11760 .pointee_type = field.ty, 11761 .mutable = ptr_ty.ptrIsMutable(), 11762 .@"addrspace" = ptr_ty.ptrAddressSpace(), 11763 }); 11764 11765 if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| { 11766 const pointer = try sema.addConstant( 11767 ptr_field_ty, 11768 try Value.Tag.field_ptr.create(arena, .{ 11769 .container_ptr = struct_ptr_val, 11770 .field_index = field_index, 11771 }), 11772 ); 11773 return sema.analyzeLoad(block, src, pointer, src); 11774 } 11775 11776 try sema.requireRuntimeBlock(block, src); 11777 const ptr_inst = try block.addStructFieldPtr(object_ptr, field_index, ptr_field_ty); 11778 return sema.analyzeLoad(block, src, ptr_inst, src); 11779 }, 11780 .Union => return sema.fail(block, src, "TODO implement field calls on unions", .{}), 11781 .Type => { 11782 const namespace = try sema.analyzeLoad(block, src, object_ptr, src); 11783 return sema.fieldVal(block, src, namespace, field_name, field_name_src); 11784 }, 11785 else => {}, 11786 } 11787 } 11788 11789 // If we get here, we need to look for a decl in the struct type instead. 11790 switch (concrete_ty.zigTypeTag()) { 11791 .Struct, .Opaque, .Union, .Enum => { 11792 if (concrete_ty.getNamespace()) |namespace| { 11793 if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { 11794 const decl_val = try sema.analyzeLoad(block, src, inst, src); 11795 const decl_type = sema.typeOf(decl_val); 11796 if (decl_type.zigTypeTag() == .Fn and 11797 decl_type.fnParamLen() >= 1) 11798 { 11799 const first_param_type = decl_type.fnParamType(0); 11800 const first_param_tag = first_param_type.tag(); 11801 // zig fmt: off 11802 if (first_param_tag == .var_args_param or 11803 first_param_tag == .generic_poison or ( 11804 first_param_type.zigTypeTag() == .Pointer and 11805 first_param_type.ptrSize() == .One and 11806 first_param_type.childType().eql(concrete_ty))) 11807 { 11808 // zig fmt: on 11809 // TODO: bound fn calls on rvalues should probably 11810 // generate a by-value argument somehow. 11811 const ty = Type.Tag.bound_fn.init(); 11812 const value = try Value.Tag.bound_fn.create(arena, .{ 11813 .func_inst = decl_val, 11814 .arg0_inst = object_ptr, 11815 }); 11816 return sema.addConstant(ty, value); 11817 } else if (first_param_type.eql(concrete_ty)) { 11818 var deref = try sema.analyzeLoad(block, src, object_ptr, src); 11819 const ty = Type.Tag.bound_fn.init(); 11820 const value = try Value.Tag.bound_fn.create(arena, .{ 11821 .func_inst = decl_val, 11822 .arg0_inst = deref, 11823 }); 11824 return sema.addConstant(ty, value); 11825 } 11826 } 11827 } 11828 } 11829 }, 11830 else => {}, 11831 } 11832 11833 return sema.fail(block, src, "type '{}' has no field or member function named '{s}'", .{ concrete_ty, field_name }); 11834} 11835 11836fn namespaceLookup( 11837 sema: *Sema, 11838 block: *Block, 11839 src: LazySrcLoc, 11840 namespace: *Namespace, 11841 decl_name: []const u8, 11842) CompileError!?*Decl { 11843 const gpa = sema.gpa; 11844 if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl| { 11845 if (!decl.is_pub and decl.getFileScope() != block.getFileScope()) { 11846 const msg = msg: { 11847 const msg = try sema.errMsg(block, src, "'{s}' is not marked 'pub'", .{ 11848 decl_name, 11849 }); 11850 errdefer msg.destroy(gpa); 11851 try sema.mod.errNoteNonLazy(decl.srcLoc(), msg, "declared here", .{}); 11852 break :msg msg; 11853 }; 11854 return sema.failWithOwnedErrorMsg(msg); 11855 } 11856 return decl; 11857 } 11858 return null; 11859} 11860 11861fn namespaceLookupRef( 11862 sema: *Sema, 11863 block: *Block, 11864 src: LazySrcLoc, 11865 namespace: *Namespace, 11866 decl_name: []const u8, 11867) CompileError!?Air.Inst.Ref { 11868 const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null; 11869 return try sema.analyzeDeclRef(decl); 11870} 11871 11872fn namespaceLookupVal( 11873 sema: *Sema, 11874 block: *Block, 11875 src: LazySrcLoc, 11876 namespace: *Namespace, 11877 decl_name: []const u8, 11878) CompileError!?Air.Inst.Ref { 11879 const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null; 11880 return try sema.analyzeDeclVal(block, src, decl); 11881} 11882 11883fn structFieldPtr( 11884 sema: *Sema, 11885 block: *Block, 11886 src: LazySrcLoc, 11887 struct_ptr: Air.Inst.Ref, 11888 field_name: []const u8, 11889 field_name_src: LazySrcLoc, 11890 unresolved_struct_ty: Type, 11891) CompileError!Air.Inst.Ref { 11892 const arena = sema.arena; 11893 assert(unresolved_struct_ty.zigTypeTag() == .Struct); 11894 11895 const struct_ptr_ty = sema.typeOf(struct_ptr); 11896 const struct_ty = try sema.resolveTypeFields(block, src, unresolved_struct_ty); 11897 const struct_obj = struct_ty.castTag(.@"struct").?.data; 11898 11899 const field_index_big = struct_obj.fields.getIndex(field_name) orelse 11900 return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); 11901 const field_index = @intCast(u32, field_index_big); 11902 const field = struct_obj.fields.values()[field_index]; 11903 const ptr_field_ty = try Type.ptr(arena, .{ 11904 .pointee_type = field.ty, 11905 .mutable = struct_ptr_ty.ptrIsMutable(), 11906 .@"addrspace" = struct_ptr_ty.ptrAddressSpace(), 11907 }); 11908 11909 if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { 11910 return sema.addConstant( 11911 ptr_field_ty, 11912 try Value.Tag.field_ptr.create(arena, .{ 11913 .container_ptr = struct_ptr_val, 11914 .field_index = field_index, 11915 }), 11916 ); 11917 } 11918 11919 try sema.requireRuntimeBlock(block, src); 11920 return block.addStructFieldPtr(struct_ptr, field_index, ptr_field_ty); 11921} 11922 11923fn structFieldVal( 11924 sema: *Sema, 11925 block: *Block, 11926 src: LazySrcLoc, 11927 struct_byval: Air.Inst.Ref, 11928 field_name: []const u8, 11929 field_name_src: LazySrcLoc, 11930 unresolved_struct_ty: Type, 11931) CompileError!Air.Inst.Ref { 11932 assert(unresolved_struct_ty.zigTypeTag() == .Struct); 11933 11934 const struct_ty = try sema.resolveTypeFields(block, src, unresolved_struct_ty); 11935 const struct_obj = struct_ty.castTag(.@"struct").?.data; 11936 11937 const field_index_usize = struct_obj.fields.getIndex(field_name) orelse 11938 return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); 11939 const field_index = @intCast(u32, field_index_usize); 11940 const field = struct_obj.fields.values()[field_index]; 11941 11942 if (try sema.resolveMaybeUndefVal(block, src, struct_byval)) |struct_val| { 11943 if (struct_val.isUndef()) return sema.addConstUndef(field.ty); 11944 11945 const field_values = struct_val.castTag(.@"struct").?.data; 11946 return sema.addConstant(field.ty, field_values[field_index]); 11947 } 11948 11949 try sema.requireRuntimeBlock(block, src); 11950 return block.addStructFieldVal(struct_byval, field_index, field.ty); 11951} 11952 11953fn unionFieldPtr( 11954 sema: *Sema, 11955 block: *Block, 11956 src: LazySrcLoc, 11957 union_ptr: Air.Inst.Ref, 11958 field_name: []const u8, 11959 field_name_src: LazySrcLoc, 11960 unresolved_union_ty: Type, 11961) CompileError!Air.Inst.Ref { 11962 const arena = sema.arena; 11963 assert(unresolved_union_ty.zigTypeTag() == .Union); 11964 11965 const union_ptr_ty = sema.typeOf(union_ptr); 11966 const union_ty = try sema.resolveTypeFields(block, src, unresolved_union_ty); 11967 const union_obj = union_ty.cast(Type.Payload.Union).?.data; 11968 11969 const field_index_big = union_obj.fields.getIndex(field_name) orelse 11970 return sema.failWithBadUnionFieldAccess(block, union_obj, field_name_src, field_name); 11971 const field_index = @intCast(u32, field_index_big); 11972 11973 const field = union_obj.fields.values()[field_index]; 11974 const ptr_field_ty = try Type.ptr(arena, .{ 11975 .pointee_type = field.ty, 11976 .mutable = union_ptr_ty.ptrIsMutable(), 11977 .@"addrspace" = union_ptr_ty.ptrAddressSpace(), 11978 }); 11979 11980 if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| { 11981 // TODO detect inactive union field and emit compile error 11982 return sema.addConstant( 11983 ptr_field_ty, 11984 try Value.Tag.field_ptr.create(arena, .{ 11985 .container_ptr = union_ptr_val, 11986 .field_index = field_index, 11987 }), 11988 ); 11989 } 11990 11991 try sema.requireRuntimeBlock(block, src); 11992 return block.addStructFieldPtr(union_ptr, field_index, ptr_field_ty); 11993} 11994 11995fn unionFieldVal( 11996 sema: *Sema, 11997 block: *Block, 11998 src: LazySrcLoc, 11999 union_byval: Air.Inst.Ref, 12000 field_name: []const u8, 12001 field_name_src: LazySrcLoc, 12002 unresolved_union_ty: Type, 12003) CompileError!Air.Inst.Ref { 12004 assert(unresolved_union_ty.zigTypeTag() == .Union); 12005 12006 const union_ty = try sema.resolveTypeFields(block, src, unresolved_union_ty); 12007 const union_obj = union_ty.cast(Type.Payload.Union).?.data; 12008 12009 const field_index_usize = union_obj.fields.getIndex(field_name) orelse 12010 return sema.failWithBadUnionFieldAccess(block, union_obj, field_name_src, field_name); 12011 const field_index = @intCast(u32, field_index_usize); 12012 const field = union_obj.fields.values()[field_index]; 12013 12014 if (try sema.resolveMaybeUndefVal(block, src, union_byval)) |union_val| { 12015 if (union_val.isUndef()) return sema.addConstUndef(field.ty); 12016 12017 // TODO detect inactive union field and emit compile error 12018 const active_val = union_val.castTag(.@"union").?.data.val; 12019 return sema.addConstant(field.ty, active_val); 12020 } 12021 12022 try sema.requireRuntimeBlock(block, src); 12023 return block.addStructFieldVal(union_byval, field_index, field.ty); 12024} 12025 12026fn elemPtr( 12027 sema: *Sema, 12028 block: *Block, 12029 src: LazySrcLoc, 12030 array_ptr: Air.Inst.Ref, 12031 elem_index: Air.Inst.Ref, 12032 elem_index_src: LazySrcLoc, 12033) CompileError!Air.Inst.Ref { 12034 const array_ptr_src = src; // TODO better source location 12035 const array_ptr_ty = sema.typeOf(array_ptr); 12036 const array_ty = switch (array_ptr_ty.zigTypeTag()) { 12037 .Pointer => array_ptr_ty.elemType(), 12038 else => return sema.fail(block, array_ptr_src, "expected pointer, found '{}'", .{array_ptr_ty}), 12039 }; 12040 if (!array_ty.isIndexable()) { 12041 return sema.fail(block, src, "array access of non-indexable type '{}'", .{array_ty}); 12042 } 12043 12044 switch (array_ty.zigTypeTag()) { 12045 .Pointer => { 12046 // In all below cases, we have to deref the ptr operand to get the actual array pointer. 12047 const array = try sema.analyzeLoad(block, array_ptr_src, array_ptr, array_ptr_src); 12048 const result_ty = try array_ty.elemPtrType(sema.arena); 12049 switch (array_ty.ptrSize()) { 12050 .Slice => { 12051 const maybe_slice_val = try sema.resolveDefinedValue(block, array_ptr_src, array); 12052 const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); 12053 const runtime_src = if (maybe_slice_val) |slice_val| rs: { 12054 const index_val = maybe_index_val orelse break :rs elem_index_src; 12055 const index = @intCast(usize, index_val.toUnsignedInt()); 12056 const elem_ptr = try slice_val.elemPtr(sema.arena, index); 12057 return sema.addConstant(result_ty, elem_ptr); 12058 } else array_ptr_src; 12059 12060 try sema.requireRuntimeBlock(block, runtime_src); 12061 return block.addSliceElemPtr(array, elem_index, result_ty); 12062 }, 12063 .Many, .C => { 12064 const maybe_ptr_val = try sema.resolveDefinedValue(block, array_ptr_src, array); 12065 const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); 12066 12067 const runtime_src = rs: { 12068 const ptr_val = maybe_ptr_val orelse break :rs array_ptr_src; 12069 const index_val = maybe_index_val orelse break :rs elem_index_src; 12070 const index = @intCast(usize, index_val.toUnsignedInt()); 12071 const elem_ptr = try ptr_val.elemPtr(sema.arena, index); 12072 return sema.addConstant(result_ty, elem_ptr); 12073 }; 12074 12075 try sema.requireRuntimeBlock(block, runtime_src); 12076 return block.addPtrElemPtr(array, elem_index, result_ty); 12077 }, 12078 .One => { 12079 assert(array_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable 12080 return sema.elemPtrArray(block, array_ptr_src, array, elem_index, elem_index_src); 12081 }, 12082 } 12083 }, 12084 .Array => return sema.elemPtrArray(block, array_ptr_src, array_ptr, elem_index, elem_index_src), 12085 .Vector => return sema.fail(block, src, "TODO implement Sema for elemPtr for vector", .{}), 12086 else => unreachable, 12087 } 12088} 12089 12090fn elemVal( 12091 sema: *Sema, 12092 block: *Block, 12093 src: LazySrcLoc, 12094 array: Air.Inst.Ref, 12095 elem_index: Air.Inst.Ref, 12096 elem_index_src: LazySrcLoc, 12097) CompileError!Air.Inst.Ref { 12098 const array_src = src; // TODO better source location 12099 const array_ty = sema.typeOf(array); 12100 12101 if (!array_ty.isIndexable()) { 12102 return sema.fail(block, src, "array access of non-indexable type '{}'", .{array_ty}); 12103 } 12104 12105 switch (array_ty.zigTypeTag()) { 12106 .Pointer => switch (array_ty.ptrSize()) { 12107 .Slice => { 12108 const maybe_slice_val = try sema.resolveDefinedValue(block, array_src, array); 12109 const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); 12110 const runtime_src = if (maybe_slice_val) |slice_val| rs: { 12111 const index_val = maybe_index_val orelse break :rs elem_index_src; 12112 const index = @intCast(usize, index_val.toUnsignedInt()); 12113 const elem_val = try slice_val.elemValue(sema.arena, index); 12114 return sema.addConstant(array_ty.elemType2(), elem_val); 12115 } else array_src; 12116 12117 try sema.requireRuntimeBlock(block, runtime_src); 12118 return block.addBinOp(.slice_elem_val, array, elem_index); 12119 }, 12120 .Many, .C => { 12121 const maybe_ptr_val = try sema.resolveDefinedValue(block, array_src, array); 12122 const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); 12123 12124 const runtime_src = rs: { 12125 const ptr_val = maybe_ptr_val orelse break :rs array_src; 12126 const index_val = maybe_index_val orelse break :rs elem_index_src; 12127 const index = @intCast(usize, index_val.toUnsignedInt()); 12128 const maybe_array_val = try sema.pointerDeref(block, array_src, ptr_val, array_ty); 12129 const array_val = maybe_array_val orelse break :rs array_src; 12130 const elem_val = try array_val.elemValue(sema.arena, index); 12131 return sema.addConstant(array_ty.elemType2(), elem_val); 12132 }; 12133 12134 try sema.requireRuntimeBlock(block, runtime_src); 12135 return block.addBinOp(.ptr_elem_val, array, elem_index); 12136 }, 12137 .One => { 12138 assert(array_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable 12139 const elem_ptr = try sema.elemPtr(block, array_src, array, elem_index, elem_index_src); 12140 return sema.analyzeLoad(block, array_src, elem_ptr, elem_index_src); 12141 }, 12142 }, 12143 .Array => { 12144 if (try sema.resolveMaybeUndefVal(block, array_src, array)) |array_val| { 12145 const elem_ty = array_ty.childType(); 12146 if (array_val.isUndef()) return sema.addConstUndef(elem_ty); 12147 const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); 12148 if (maybe_index_val) |index_val| { 12149 const index = @intCast(usize, index_val.toUnsignedInt()); 12150 const elem_val = try array_val.elemValue(sema.arena, index); 12151 return sema.addConstant(elem_ty, elem_val); 12152 } 12153 } 12154 try sema.requireRuntimeBlock(block, array_src); 12155 return block.addBinOp(.array_elem_val, array, elem_index); 12156 }, 12157 .Vector => return sema.fail(block, array_src, "TODO implement Sema for elemVal for vector", .{}), 12158 else => unreachable, 12159 } 12160} 12161 12162fn elemPtrArray( 12163 sema: *Sema, 12164 block: *Block, 12165 src: LazySrcLoc, 12166 array_ptr: Air.Inst.Ref, 12167 elem_index: Air.Inst.Ref, 12168 elem_index_src: LazySrcLoc, 12169) CompileError!Air.Inst.Ref { 12170 const array_ptr_ty = sema.typeOf(array_ptr); 12171 const result_ty = try array_ptr_ty.elemPtrType(sema.arena); 12172 12173 if (try sema.resolveDefinedValue(block, src, array_ptr)) |array_ptr_val| { 12174 if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| { 12175 // Both array pointer and index are compile-time known. 12176 const index_u64 = index_val.toUnsignedInt(); 12177 // @intCast here because it would have been impossible to construct a value that 12178 // required a larger index. 12179 const elem_ptr = try array_ptr_val.elemPtr(sema.arena, @intCast(usize, index_u64)); 12180 return sema.addConstant(result_ty, elem_ptr); 12181 } 12182 } 12183 // TODO safety check for array bounds 12184 try sema.requireRuntimeBlock(block, src); 12185 return block.addPtrElemPtr(array_ptr, elem_index, result_ty); 12186} 12187 12188fn coerce( 12189 sema: *Sema, 12190 block: *Block, 12191 dest_ty_unresolved: Type, 12192 inst: Air.Inst.Ref, 12193 inst_src: LazySrcLoc, 12194) CompileError!Air.Inst.Ref { 12195 switch (dest_ty_unresolved.tag()) { 12196 .var_args_param => return sema.coerceVarArgParam(block, inst, inst_src), 12197 .generic_poison => return inst, 12198 else => {}, 12199 } 12200 const dest_ty_src = inst_src; // TODO better source location 12201 const dest_ty = try sema.resolveTypeFields(block, dest_ty_src, dest_ty_unresolved); 12202 const inst_ty = try sema.resolveTypeFields(block, inst_src, sema.typeOf(inst)); 12203 // If the types are the same, we can return the operand. 12204 if (dest_ty.eql(inst_ty)) 12205 return inst; 12206 12207 const arena = sema.arena; 12208 const target = sema.mod.getTarget(); 12209 12210 const in_memory_result = coerceInMemoryAllowed(dest_ty, inst_ty, false, target); 12211 if (in_memory_result == .ok) { 12212 if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| { 12213 // Keep the comptime Value representation; take the new type. 12214 return sema.addConstant(dest_ty, val); 12215 } 12216 try sema.requireRuntimeBlock(block, inst_src); 12217 return block.addBitCast(dest_ty, inst); 12218 } 12219 12220 // undefined to anything 12221 if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| { 12222 if (val.isUndef() or inst_ty.zigTypeTag() == .Undefined) { 12223 return sema.addConstant(dest_ty, val); 12224 } 12225 } 12226 assert(inst_ty.zigTypeTag() != .Undefined); 12227 12228 // comptime known number to other number 12229 // TODO why is this a separate function? should just be flattened into the 12230 // switch expression below. 12231 if (try sema.coerceNum(block, dest_ty, inst, inst_src)) |some| 12232 return some; 12233 12234 switch (dest_ty.zigTypeTag()) { 12235 .Optional => { 12236 // null to ?T 12237 if (inst_ty.zigTypeTag() == .Null) { 12238 return sema.addConstant(dest_ty, Value.@"null"); 12239 } 12240 12241 // T to ?T 12242 const child_type = try dest_ty.optionalChildAlloc(sema.arena); 12243 const intermediate = try sema.coerce(block, child_type, inst, inst_src); 12244 return sema.wrapOptional(block, dest_ty, intermediate, inst_src); 12245 }, 12246 .Pointer => { 12247 const dest_info = dest_ty.ptrInfo().data; 12248 12249 // Function body to function pointer. 12250 if (inst_ty.zigTypeTag() == .Fn) { 12251 const fn_val = try sema.resolveConstValue(block, inst_src, inst); 12252 const fn_decl = fn_val.castTag(.function).?.data.owner_decl; 12253 const inst_as_ptr = try sema.analyzeDeclRef(fn_decl); 12254 return sema.coerce(block, dest_ty, inst_as_ptr, inst_src); 12255 } 12256 12257 // *T to *[1]T 12258 single_item: { 12259 if (dest_info.size != .One) break :single_item; 12260 if (!inst_ty.isSinglePointer()) break :single_item; 12261 const ptr_elem_ty = inst_ty.childType(); 12262 const array_ty = dest_info.pointee_type; 12263 if (array_ty.zigTypeTag() != .Array) break :single_item; 12264 const array_elem_ty = array_ty.childType(); 12265 const dest_is_mut = dest_info.mutable; 12266 if (inst_ty.isConstPtr() and dest_is_mut) break :single_item; 12267 if (inst_ty.isVolatilePtr() and !dest_info.@"volatile") break :single_item; 12268 if (inst_ty.ptrAddressSpace() != dest_info.@"addrspace") break :single_item; 12269 switch (coerceInMemoryAllowed(array_elem_ty, ptr_elem_ty, dest_is_mut, target)) { 12270 .ok => {}, 12271 .no_match => break :single_item, 12272 } 12273 return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); 12274 } 12275 12276 // Coercions where the source is a single pointer to an array. 12277 src_array_ptr: { 12278 if (!inst_ty.isSinglePointer()) break :src_array_ptr; 12279 const array_ty = inst_ty.childType(); 12280 if (array_ty.zigTypeTag() != .Array) break :src_array_ptr; 12281 const array_elem_type = array_ty.childType(); 12282 const dest_is_mut = dest_info.mutable; 12283 if (inst_ty.isConstPtr() and dest_is_mut) break :src_array_ptr; 12284 if (inst_ty.isVolatilePtr() and !dest_info.@"volatile") break :src_array_ptr; 12285 if (inst_ty.ptrAddressSpace() != dest_info.@"addrspace") break :src_array_ptr; 12286 12287 const dst_elem_type = dest_info.pointee_type; 12288 switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type, dest_is_mut, target)) { 12289 .ok => {}, 12290 .no_match => break :src_array_ptr, 12291 } 12292 12293 switch (dest_info.size) { 12294 .Slice => { 12295 // *[N]T to []T 12296 return sema.coerceArrayPtrToSlice(block, dest_ty, inst, inst_src); 12297 }, 12298 .C => { 12299 // *[N]T to [*c]T 12300 return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); 12301 }, 12302 .Many => { 12303 // *[N]T to [*]T 12304 // *[N:s]T to [*:s]T 12305 // *[N:s]T to [*]T 12306 if (dest_info.sentinel) |dst_sentinel| { 12307 if (array_ty.sentinel()) |src_sentinel| { 12308 if (src_sentinel.eql(dst_sentinel, dst_elem_type)) { 12309 return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); 12310 } 12311 } 12312 } else { 12313 return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); 12314 } 12315 }, 12316 .One => {}, 12317 } 12318 } 12319 12320 // coercion from C pointer 12321 if (inst_ty.isCPtr()) src_c_ptr: { 12322 // In this case we must add a safety check because the C pointer 12323 // could be null. 12324 const src_elem_ty = inst_ty.childType(); 12325 const dest_is_mut = dest_info.mutable; 12326 const dst_elem_type = dest_info.pointee_type; 12327 switch (coerceInMemoryAllowed(dst_elem_type, src_elem_ty, dest_is_mut, target)) { 12328 .ok => {}, 12329 .no_match => break :src_c_ptr, 12330 } 12331 // TODO add safety check for null pointer 12332 return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); 12333 } 12334 12335 // coercion to C pointer 12336 if (dest_info.size == .C) { 12337 switch (inst_ty.zigTypeTag()) { 12338 .Null => { 12339 return sema.addConstant(dest_ty, Value.@"null"); 12340 }, 12341 .ComptimeInt => { 12342 const addr = try sema.coerce(block, Type.usize, inst, inst_src); 12343 return sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src); 12344 }, 12345 .Int => { 12346 const ptr_size_ty = switch (inst_ty.intInfo(target).signedness) { 12347 .signed => Type.isize, 12348 .unsigned => Type.usize, 12349 }; 12350 const addr = try sema.coerce(block, ptr_size_ty, inst, inst_src); 12351 return sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src); 12352 }, 12353 else => {}, 12354 } 12355 } 12356 12357 // cast from *T and [*]T to *anyopaque 12358 // but don't do it if the source type is a double pointer 12359 if (dest_info.pointee_type.tag() == .anyopaque and inst_ty.zigTypeTag() == .Pointer and 12360 inst_ty.childType().zigTypeTag() != .Pointer) 12361 { 12362 return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); 12363 } 12364 }, 12365 .Int => { 12366 // integer widening 12367 if (inst_ty.zigTypeTag() == .Int) { 12368 assert(!(try sema.isComptimeKnown(block, inst_src, inst))); // handled above 12369 12370 const dst_info = dest_ty.intInfo(target); 12371 const src_info = inst_ty.intInfo(target); 12372 if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or 12373 // small enough unsigned ints can get casted to large enough signed ints 12374 (dst_info.signedness == .signed and dst_info.bits > src_info.bits)) 12375 { 12376 try sema.requireRuntimeBlock(block, inst_src); 12377 return block.addTyOp(.intcast, dest_ty, inst); 12378 } 12379 } 12380 }, 12381 .Float => { 12382 // float widening 12383 if (inst_ty.zigTypeTag() == .Float) { 12384 assert(!(try sema.isComptimeKnown(block, inst_src, inst))); // handled above 12385 12386 const src_bits = inst_ty.floatBits(target); 12387 const dst_bits = dest_ty.floatBits(target); 12388 if (dst_bits >= src_bits) { 12389 try sema.requireRuntimeBlock(block, inst_src); 12390 return block.addTyOp(.fpext, dest_ty, inst); 12391 } 12392 } 12393 }, 12394 .Enum => switch (inst_ty.zigTypeTag()) { 12395 .EnumLiteral => { 12396 // enum literal to enum 12397 const val = try sema.resolveConstValue(block, inst_src, inst); 12398 const bytes = val.castTag(.enum_literal).?.data; 12399 const field_index = dest_ty.enumFieldIndex(bytes) orelse { 12400 const msg = msg: { 12401 const msg = try sema.errMsg( 12402 block, 12403 inst_src, 12404 "enum '{}' has no field named '{s}'", 12405 .{ dest_ty, bytes }, 12406 ); 12407 errdefer msg.destroy(sema.gpa); 12408 try sema.mod.errNoteNonLazy( 12409 dest_ty.declSrcLoc(), 12410 msg, 12411 "enum declared here", 12412 .{}, 12413 ); 12414 break :msg msg; 12415 }; 12416 return sema.failWithOwnedErrorMsg(msg); 12417 }; 12418 return sema.addConstant( 12419 dest_ty, 12420 try Value.Tag.enum_field_index.create(arena, @intCast(u32, field_index)), 12421 ); 12422 }, 12423 .Union => blk: { 12424 // union to its own tag type 12425 const union_tag_ty = inst_ty.unionTagType() orelse break :blk; 12426 if (union_tag_ty.eql(dest_ty)) { 12427 return sema.unionToTag(block, inst_ty, inst, inst_src); 12428 } 12429 }, 12430 else => {}, 12431 }, 12432 .ErrorUnion => { 12433 // T to E!T or E to E!T 12434 return sema.wrapErrorUnion(block, dest_ty, inst, inst_src); 12435 }, 12436 .Union => switch (inst_ty.zigTypeTag()) { 12437 .Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src), 12438 else => {}, 12439 }, 12440 .Array => switch (inst_ty.zigTypeTag()) { 12441 .Vector => return sema.coerceVectorInMemory(block, dest_ty, dest_ty_src, inst, inst_src), 12442 else => {}, 12443 }, 12444 .Vector => switch (inst_ty.zigTypeTag()) { 12445 .Array => return sema.coerceVectorInMemory(block, dest_ty, dest_ty_src, inst, inst_src), 12446 else => {}, 12447 }, 12448 else => {}, 12449 } 12450 12451 return sema.fail(block, inst_src, "expected {}, found {}", .{ dest_ty, inst_ty }); 12452} 12453 12454const InMemoryCoercionResult = enum { 12455 ok, 12456 no_match, 12457}; 12458 12459/// If pointers have the same representation in runtime memory, a bitcast AIR instruction 12460/// may be used for the coercion. 12461/// * `const` attribute can be gained 12462/// * `volatile` attribute can be gained 12463/// * `allowzero` attribute can be gained (whether from explicit attribute, C pointer, or optional pointer) but only if !dest_is_mut 12464/// * alignment can be decreased 12465/// * bit offset attributes must match exactly 12466/// * `*`/`[*]` must match exactly, but `[*c]` matches either one 12467/// * sentinel-terminated pointers can coerce into `[*]` 12468/// TODO improve this function to report recursive compile errors like it does in stage1. 12469/// look at the function types_match_const_cast_only 12470fn coerceInMemoryAllowed(dest_ty: Type, src_ty: Type, dest_is_mut: bool, target: std.Target) InMemoryCoercionResult { 12471 if (dest_ty.eql(src_ty)) 12472 return .ok; 12473 12474 // Pointers / Pointer-like Optionals 12475 var dest_buf: Type.Payload.ElemType = undefined; 12476 var src_buf: Type.Payload.ElemType = undefined; 12477 if (dest_ty.ptrOrOptionalPtrTy(&dest_buf)) |dest_ptr_ty| { 12478 if (src_ty.ptrOrOptionalPtrTy(&src_buf)) |src_ptr_ty| { 12479 return coerceInMemoryAllowedPtrs(dest_ty, src_ty, dest_ptr_ty, src_ptr_ty, dest_is_mut, target); 12480 } 12481 } 12482 12483 // Slices 12484 if (dest_ty.isSlice() and src_ty.isSlice()) { 12485 return coerceInMemoryAllowedPtrs(dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target); 12486 } 12487 12488 // Functions 12489 if (dest_ty.zigTypeTag() == .Fn and src_ty.zigTypeTag() == .Fn) { 12490 return coerceInMemoryAllowedFns(dest_ty, src_ty, target); 12491 } 12492 12493 // Error Unions 12494 if (dest_ty.zigTypeTag() == .ErrorUnion and src_ty.zigTypeTag() == .ErrorUnion) { 12495 const child = coerceInMemoryAllowed(dest_ty.errorUnionPayload(), src_ty.errorUnionPayload(), dest_is_mut, target); 12496 if (child == .no_match) { 12497 return child; 12498 } 12499 return coerceInMemoryAllowed(dest_ty.errorUnionSet(), src_ty.errorUnionSet(), dest_is_mut, target); 12500 } 12501 12502 // Error Sets 12503 if (dest_ty.zigTypeTag() == .ErrorSet and src_ty.zigTypeTag() == .ErrorSet) { 12504 return coerceInMemoryAllowedErrorSets(dest_ty, src_ty); 12505 } 12506 12507 // TODO: arrays 12508 // TODO: non-pointer-like optionals 12509 // TODO: vectors 12510 12511 return .no_match; 12512} 12513 12514fn coerceInMemoryAllowedErrorSets( 12515 dest_ty: Type, 12516 src_ty: Type, 12517) InMemoryCoercionResult { 12518 // Coercion to `anyerror`. Note that this check can return false positives 12519 // in case the error sets did not get resolved. 12520 if (dest_ty.isAnyError()) { 12521 return .ok; 12522 } 12523 // If both are inferred error sets of functions, and 12524 // the dest includes the source function, the coercion is OK. 12525 // This check is important because it works without forcing a full resolution 12526 // of inferred error sets. 12527 if (src_ty.castTag(.error_set_inferred)) |src_payload| { 12528 if (dest_ty.castTag(.error_set_inferred)) |dst_payload| { 12529 const src_func = src_payload.data.func; 12530 const dst_func = dst_payload.data.func; 12531 12532 if (src_func == dst_func or dst_payload.data.functions.contains(src_func)) { 12533 return .ok; 12534 } 12535 } 12536 } 12537 12538 // TODO full error set resolution and compare sets by names. 12539 return .no_match; 12540} 12541 12542fn coerceInMemoryAllowedFns( 12543 dest_ty: Type, 12544 src_ty: Type, 12545 target: std.Target, 12546) InMemoryCoercionResult { 12547 const dest_info = dest_ty.fnInfo(); 12548 const src_info = src_ty.fnInfo(); 12549 12550 if (dest_info.is_var_args != src_info.is_var_args) { 12551 return .no_match; 12552 } 12553 12554 if (dest_info.is_generic != src_info.is_generic) { 12555 return .no_match; 12556 } 12557 12558 if (!src_info.return_type.isNoReturn()) { 12559 const rt = coerceInMemoryAllowed(dest_info.return_type, src_info.return_type, false, target); 12560 if (rt == .no_match) { 12561 return rt; 12562 } 12563 } 12564 12565 if (dest_info.param_types.len != src_info.param_types.len) { 12566 return .no_match; 12567 } 12568 12569 for (dest_info.param_types) |dest_param_ty, i| { 12570 const src_param_ty = src_info.param_types[i]; 12571 12572 if (dest_info.comptime_params[i] != src_info.comptime_params[i]) { 12573 return .no_match; 12574 } 12575 12576 // TODO: nolias 12577 12578 // Note: Cast direction is reversed here. 12579 const param = coerceInMemoryAllowed(src_param_ty, dest_param_ty, false, target); 12580 if (param == .no_match) { 12581 return param; 12582 } 12583 } 12584 12585 if (dest_info.cc != src_info.cc) { 12586 return .no_match; 12587 } 12588 12589 return .ok; 12590} 12591 12592fn coerceInMemoryAllowedPtrs( 12593 dest_ty: Type, 12594 src_ty: Type, 12595 dest_ptr_ty: Type, 12596 src_ptr_ty: Type, 12597 dest_is_mut: bool, 12598 target: std.Target, 12599) InMemoryCoercionResult { 12600 const dest_info = dest_ptr_ty.ptrInfo().data; 12601 const src_info = src_ptr_ty.ptrInfo().data; 12602 12603 const child = coerceInMemoryAllowed(dest_info.pointee_type, src_info.pointee_type, dest_info.mutable, target); 12604 if (child == .no_match) { 12605 return child; 12606 } 12607 12608 if (dest_info.@"addrspace" != src_info.@"addrspace") { 12609 return .no_match; 12610 } 12611 12612 const ok_sent = dest_info.sentinel == null or src_info.size == .C or 12613 (src_info.sentinel != null and 12614 dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.pointee_type)); 12615 if (!ok_sent) { 12616 return .no_match; 12617 } 12618 12619 const ok_ptr_size = src_info.size == dest_info.size or 12620 src_info.size == .C or dest_info.size == .C; 12621 if (!ok_ptr_size) { 12622 return .no_match; 12623 } 12624 12625 const ok_cv_qualifiers = 12626 (src_info.mutable or !dest_info.mutable) and 12627 (!src_info.@"volatile" or dest_info.@"volatile"); 12628 12629 if (!ok_cv_qualifiers) { 12630 return .no_match; 12631 } 12632 12633 const dest_allow_zero = dest_ty.ptrAllowsZero(); 12634 const src_allow_zero = src_ty.ptrAllowsZero(); 12635 12636 const ok_allows_zero = (dest_allow_zero and 12637 (src_allow_zero or !dest_is_mut)) or 12638 (!dest_allow_zero and !src_allow_zero); 12639 if (!ok_allows_zero) { 12640 return .no_match; 12641 } 12642 12643 if (src_info.host_size != dest_info.host_size or 12644 src_info.bit_offset != dest_info.bit_offset) 12645 { 12646 return .no_match; 12647 } 12648 12649 // If both pointers have alignment 0, it means they both want ABI alignment. 12650 // In this case, if they share the same child type, no need to resolve 12651 // pointee type alignment. Otherwise both pointee types must have their alignment 12652 // resolved and we compare the alignment numerically. 12653 if (src_info.@"align" != 0 or dest_info.@"align" != 0 or 12654 !dest_info.pointee_type.eql(src_info.pointee_type)) 12655 { 12656 const src_align = src_info.@"align"; 12657 const dest_align = dest_info.@"align"; 12658 12659 if (dest_align > src_align) { 12660 return .no_match; 12661 } 12662 } 12663 12664 return .ok; 12665} 12666 12667fn coerceNum( 12668 sema: *Sema, 12669 block: *Block, 12670 dest_ty: Type, 12671 inst: Air.Inst.Ref, 12672 inst_src: LazySrcLoc, 12673) CompileError!?Air.Inst.Ref { 12674 const val = (try sema.resolveDefinedValue(block, inst_src, inst)) orelse return null; 12675 const inst_ty = sema.typeOf(inst); 12676 const src_zig_tag = inst_ty.zigTypeTag(); 12677 const dst_zig_tag = dest_ty.zigTypeTag(); 12678 12679 const target = sema.mod.getTarget(); 12680 12681 switch (dst_zig_tag) { 12682 .ComptimeInt, .Int => switch (src_zig_tag) { 12683 .Float, .ComptimeFloat => { 12684 if (val.floatHasFraction()) { 12685 return sema.fail(block, inst_src, "fractional component prevents float value {} from coercion to type '{}'", .{ val, dest_ty }); 12686 } 12687 const result_val = val.floatToInt(sema.arena, dest_ty, target) catch |err| switch (err) { 12688 error.FloatCannotFit => { 12689 return sema.fail(block, inst_src, "integer value {d} cannot be stored in type '{}'", .{ std.math.floor(val.toFloat(f64)), dest_ty }); 12690 }, 12691 else => |e| return e, 12692 }; 12693 return try sema.addConstant(dest_ty, result_val); 12694 }, 12695 .Int, .ComptimeInt => { 12696 if (!val.intFitsInType(dest_ty, target)) { 12697 return sema.fail(block, inst_src, "type {} cannot represent integer value {}", .{ dest_ty, val }); 12698 } 12699 return try sema.addConstant(dest_ty, val); 12700 }, 12701 else => {}, 12702 }, 12703 .ComptimeFloat, .Float => switch (src_zig_tag) { 12704 .ComptimeFloat => { 12705 const result_val = try val.floatCast(sema.arena, dest_ty); 12706 return try sema.addConstant(dest_ty, result_val); 12707 }, 12708 .Float => { 12709 const result_val = try val.floatCast(sema.arena, dest_ty); 12710 if (!val.eql(result_val, dest_ty)) { 12711 return sema.fail( 12712 block, 12713 inst_src, 12714 "type {} cannot represent float value {}", 12715 .{ dest_ty, val }, 12716 ); 12717 } 12718 return try sema.addConstant(dest_ty, result_val); 12719 }, 12720 .Int, .ComptimeInt => { 12721 const result_val = try val.intToFloat(sema.arena, dest_ty, target); 12722 // TODO implement this compile error 12723 //const int_again_val = try result_val.floatToInt(sema.arena, inst_ty); 12724 //if (!int_again_val.eql(val, inst_ty)) { 12725 // return sema.fail( 12726 // block, 12727 // inst_src, 12728 // "type {} cannot represent integer value {}", 12729 // .{ dest_ty, val }, 12730 // ); 12731 //} 12732 return try sema.addConstant(dest_ty, result_val); 12733 }, 12734 else => {}, 12735 }, 12736 else => {}, 12737 } 12738 return null; 12739} 12740 12741fn coerceVarArgParam( 12742 sema: *Sema, 12743 block: *Block, 12744 inst: Air.Inst.Ref, 12745 inst_src: LazySrcLoc, 12746) !Air.Inst.Ref { 12747 const inst_ty = sema.typeOf(inst); 12748 switch (inst_ty.zigTypeTag()) { 12749 .ComptimeInt, .ComptimeFloat => return sema.fail(block, inst_src, "integer and float literals in var args function must be casted", .{}), 12750 else => {}, 12751 } 12752 // TODO implement more of this function. 12753 return inst; 12754} 12755 12756// TODO migrate callsites to use storePtr2 instead. 12757fn storePtr( 12758 sema: *Sema, 12759 block: *Block, 12760 src: LazySrcLoc, 12761 ptr: Air.Inst.Ref, 12762 uncasted_operand: Air.Inst.Ref, 12763) CompileError!void { 12764 return sema.storePtr2(block, src, ptr, src, uncasted_operand, src, .store); 12765} 12766 12767fn storePtr2( 12768 sema: *Sema, 12769 block: *Block, 12770 src: LazySrcLoc, 12771 ptr: Air.Inst.Ref, 12772 ptr_src: LazySrcLoc, 12773 uncasted_operand: Air.Inst.Ref, 12774 operand_src: LazySrcLoc, 12775 air_tag: Air.Inst.Tag, 12776) !void { 12777 const ptr_ty = sema.typeOf(ptr); 12778 if (ptr_ty.isConstPtr()) 12779 return sema.fail(block, src, "cannot assign to constant", .{}); 12780 12781 const elem_ty = ptr_ty.childType(); 12782 const operand = try sema.coerce(block, elem_ty, uncasted_operand, operand_src); 12783 if ((try sema.typeHasOnePossibleValue(block, src, elem_ty)) != null) 12784 return; 12785 12786 const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { 12787 const maybe_operand_val = try sema.resolveMaybeUndefVal(block, operand_src, operand); 12788 const operand_val = maybe_operand_val orelse { 12789 try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src); 12790 break :rs operand_src; 12791 }; 12792 if (ptr_val.isComptimeMutablePtr()) { 12793 try sema.storePtrVal(block, src, ptr_val, operand_val, elem_ty); 12794 return; 12795 } else break :rs ptr_src; 12796 } else ptr_src; 12797 12798 // TODO handle if the element type requires comptime 12799 12800 try sema.requireRuntimeBlock(block, runtime_src); 12801 try sema.resolveTypeLayout(block, src, elem_ty); 12802 _ = try block.addBinOp(air_tag, ptr, operand); 12803} 12804 12805/// Call when you have Value objects rather than Air instructions, and you want to 12806/// assert the store must be done at comptime. 12807fn storePtrVal( 12808 sema: *Sema, 12809 block: *Block, 12810 src: LazySrcLoc, 12811 ptr_val: Value, 12812 operand_val: Value, 12813 operand_ty: Type, 12814) !void { 12815 var kit = try beginComptimePtrMutation(sema, block, src, ptr_val); 12816 try sema.checkComptimeVarStore(block, src, kit.decl_ref_mut); 12817 12818 const bitcasted_val = try sema.bitCastVal(block, src, operand_val, operand_ty, kit.ty); 12819 12820 const arena = kit.beginArena(sema.gpa); 12821 defer kit.finishArena(); 12822 12823 kit.val.* = try bitcasted_val.copy(arena); 12824} 12825 12826const ComptimePtrMutationKit = struct { 12827 decl_ref_mut: Value.Payload.DeclRefMut.Data, 12828 val: *Value, 12829 ty: Type, 12830 decl_arena: std.heap.ArenaAllocator = undefined, 12831 12832 fn beginArena(self: *ComptimePtrMutationKit, gpa: Allocator) Allocator { 12833 self.decl_arena = self.decl_ref_mut.decl.value_arena.?.promote(gpa); 12834 return self.decl_arena.allocator(); 12835 } 12836 12837 fn finishArena(self: *ComptimePtrMutationKit) void { 12838 self.decl_ref_mut.decl.value_arena.?.* = self.decl_arena.state; 12839 self.decl_arena = undefined; 12840 } 12841}; 12842 12843fn beginComptimePtrMutation( 12844 sema: *Sema, 12845 block: *Block, 12846 src: LazySrcLoc, 12847 ptr_val: Value, 12848) CompileError!ComptimePtrMutationKit { 12849 switch (ptr_val.tag()) { 12850 .decl_ref_mut => { 12851 const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data; 12852 return ComptimePtrMutationKit{ 12853 .decl_ref_mut = decl_ref_mut, 12854 .val = &decl_ref_mut.decl.val, 12855 .ty = decl_ref_mut.decl.ty, 12856 }; 12857 }, 12858 .elem_ptr => { 12859 const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; 12860 var parent = try beginComptimePtrMutation(sema, block, src, elem_ptr.array_ptr); 12861 const elem_ty = parent.ty.childType(); 12862 switch (parent.val.tag()) { 12863 .undef => { 12864 // An array has been initialized to undefined at comptime and now we 12865 // are for the first time setting an element. We must change the representation 12866 // of the array from `undef` to `array`. 12867 const arena = parent.beginArena(sema.gpa); 12868 defer parent.finishArena(); 12869 12870 const array_len_including_sentinel = 12871 try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel()); 12872 const elems = try arena.alloc(Value, array_len_including_sentinel); 12873 mem.set(Value, elems, Value.undef); 12874 12875 parent.val.* = try Value.Tag.array.create(arena, elems); 12876 12877 return ComptimePtrMutationKit{ 12878 .decl_ref_mut = parent.decl_ref_mut, 12879 .val = &elems[elem_ptr.index], 12880 .ty = elem_ty, 12881 }; 12882 }, 12883 .bytes => { 12884 // An array is memory-optimized to store a slice of bytes, but we are about 12885 // to modify an individual field and the representation has to change. 12886 // If we wanted to avoid this, there would need to be special detection 12887 // elsewhere to identify when writing a value to an array element that is stored 12888 // using the `bytes` tag, and handle it without making a call to this function. 12889 const arena = parent.beginArena(sema.gpa); 12890 defer parent.finishArena(); 12891 12892 const bytes = parent.val.castTag(.bytes).?.data; 12893 assert(bytes.len == parent.ty.arrayLenIncludingSentinel()); 12894 const elems = try arena.alloc(Value, bytes.len); 12895 for (elems) |*elem, i| { 12896 elem.* = try Value.Tag.int_u64.create(arena, bytes[i]); 12897 } 12898 12899 parent.val.* = try Value.Tag.array.create(arena, elems); 12900 12901 return ComptimePtrMutationKit{ 12902 .decl_ref_mut = parent.decl_ref_mut, 12903 .val = &elems[elem_ptr.index], 12904 .ty = elem_ty, 12905 }; 12906 }, 12907 .repeated => { 12908 // An array is memory-optimized to store only a single element value, and 12909 // that value is understood to be the same for the entire length of the array. 12910 // However, now we want to modify an individual field and so the 12911 // representation has to change. If we wanted to avoid this, there would 12912 // need to be special detection elsewhere to identify when writing a value to an 12913 // array element that is stored using the `repeated` tag, and handle it 12914 // without making a call to this function. 12915 const arena = parent.beginArena(sema.gpa); 12916 defer parent.finishArena(); 12917 12918 const repeated_val = try parent.val.castTag(.repeated).?.data.copy(arena); 12919 const array_len_including_sentinel = 12920 try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel()); 12921 const elems = try arena.alloc(Value, array_len_including_sentinel); 12922 mem.set(Value, elems, repeated_val); 12923 12924 parent.val.* = try Value.Tag.array.create(arena, elems); 12925 12926 return ComptimePtrMutationKit{ 12927 .decl_ref_mut = parent.decl_ref_mut, 12928 .val = &elems[elem_ptr.index], 12929 .ty = elem_ty, 12930 }; 12931 }, 12932 12933 .array => return ComptimePtrMutationKit{ 12934 .decl_ref_mut = parent.decl_ref_mut, 12935 .val = &parent.val.castTag(.array).?.data[elem_ptr.index], 12936 .ty = elem_ty, 12937 }, 12938 12939 else => unreachable, 12940 } 12941 }, 12942 .field_ptr => { 12943 const field_ptr = ptr_val.castTag(.field_ptr).?.data; 12944 var parent = try beginComptimePtrMutation(sema, block, src, field_ptr.container_ptr); 12945 const field_index = @intCast(u32, field_ptr.field_index); 12946 const field_ty = parent.ty.structFieldType(field_index); 12947 switch (parent.val.tag()) { 12948 .undef => { 12949 // A struct or union has been initialized to undefined at comptime and now we 12950 // are for the first time setting a field. We must change the representation 12951 // of the struct/union from `undef` to `struct`/`union`. 12952 const arena = parent.beginArena(sema.gpa); 12953 defer parent.finishArena(); 12954 12955 switch (parent.ty.zigTypeTag()) { 12956 .Struct => { 12957 const fields = try arena.alloc(Value, parent.ty.structFieldCount()); 12958 mem.set(Value, fields, Value.undef); 12959 12960 parent.val.* = try Value.Tag.@"struct".create(arena, fields); 12961 12962 return ComptimePtrMutationKit{ 12963 .decl_ref_mut = parent.decl_ref_mut, 12964 .val = &fields[field_index], 12965 .ty = field_ty, 12966 }; 12967 }, 12968 .Union => { 12969 const payload = try arena.create(Value.Payload.Union); 12970 payload.* = .{ .data = .{ 12971 .tag = try Value.Tag.enum_field_index.create(arena, field_index), 12972 .val = Value.undef, 12973 } }; 12974 12975 parent.val.* = Value.initPayload(&payload.base); 12976 12977 return ComptimePtrMutationKit{ 12978 .decl_ref_mut = parent.decl_ref_mut, 12979 .val = &payload.data.val, 12980 .ty = field_ty, 12981 }; 12982 }, 12983 else => unreachable, 12984 } 12985 }, 12986 .@"struct" => return ComptimePtrMutationKit{ 12987 .decl_ref_mut = parent.decl_ref_mut, 12988 .val = &parent.val.castTag(.@"struct").?.data[field_index], 12989 .ty = field_ty, 12990 }, 12991 .@"union" => { 12992 // We need to set the active field of the union. 12993 const arena = parent.beginArena(sema.gpa); 12994 defer parent.finishArena(); 12995 12996 const payload = &parent.val.castTag(.@"union").?.data; 12997 payload.tag = try Value.Tag.enum_field_index.create(arena, field_index); 12998 12999 return ComptimePtrMutationKit{ 13000 .decl_ref_mut = parent.decl_ref_mut, 13001 .val = &payload.val, 13002 .ty = field_ty, 13003 }; 13004 }, 13005 13006 else => unreachable, 13007 } 13008 }, 13009 .eu_payload_ptr => return sema.fail(block, src, "TODO comptime store to eu_payload_ptr", .{}), 13010 .opt_payload_ptr => return sema.fail(block, src, "TODO comptime store opt_payload_ptr", .{}), 13011 .decl_ref => unreachable, // isComptimeMutablePtr() has been checked already 13012 else => unreachable, 13013 } 13014} 13015 13016const ComptimePtrLoadKit = struct { 13017 /// The Value of the Decl that owns this memory. 13018 root_val: Value, 13019 /// Parent Value. 13020 val: Value, 13021 /// The Type of the parent Value. 13022 ty: Type, 13023 /// The starting byte offset of `val` from `root_val`. 13024 byte_offset: usize, 13025 /// Whether the `root_val` could be mutated by further 13026 /// semantic analysis and a copy must be performed. 13027 is_mutable: bool, 13028}; 13029 13030const ComptimePtrLoadError = CompileError || error{ 13031 RuntimeLoad, 13032}; 13033 13034fn beginComptimePtrLoad( 13035 sema: *Sema, 13036 block: *Block, 13037 src: LazySrcLoc, 13038 ptr_val: Value, 13039) ComptimePtrLoadError!ComptimePtrLoadKit { 13040 const target = sema.mod.getTarget(); 13041 switch (ptr_val.tag()) { 13042 .decl_ref => { 13043 const decl = ptr_val.castTag(.decl_ref).?.data; 13044 const decl_val = try decl.value(); 13045 if (decl_val.tag() == .variable) return error.RuntimeLoad; 13046 return ComptimePtrLoadKit{ 13047 .root_val = decl_val, 13048 .val = decl_val, 13049 .ty = decl.ty, 13050 .byte_offset = 0, 13051 .is_mutable = false, 13052 }; 13053 }, 13054 .decl_ref_mut => { 13055 const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl; 13056 const decl_val = try decl.value(); 13057 if (decl_val.tag() == .variable) return error.RuntimeLoad; 13058 return ComptimePtrLoadKit{ 13059 .root_val = decl_val, 13060 .val = decl_val, 13061 .ty = decl.ty, 13062 .byte_offset = 0, 13063 .is_mutable = true, 13064 }; 13065 }, 13066 .elem_ptr => { 13067 const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; 13068 const parent = try beginComptimePtrLoad(sema, block, src, elem_ptr.array_ptr); 13069 const elem_ty = parent.ty.childType(); 13070 const elem_size = elem_ty.abiSize(target); 13071 return ComptimePtrLoadKit{ 13072 .root_val = parent.root_val, 13073 .val = try parent.val.elemValue(sema.arena, elem_ptr.index), 13074 .ty = elem_ty, 13075 .byte_offset = try sema.usizeCast(block, src, parent.byte_offset + elem_size * elem_ptr.index), 13076 .is_mutable = parent.is_mutable, 13077 }; 13078 }, 13079 .field_ptr => { 13080 const field_ptr = ptr_val.castTag(.field_ptr).?.data; 13081 const parent = try beginComptimePtrLoad(sema, block, src, field_ptr.container_ptr); 13082 const field_index = @intCast(u32, field_ptr.field_index); 13083 try sema.resolveTypeLayout(block, src, parent.ty); 13084 const field_offset = parent.ty.structFieldOffset(field_index, target); 13085 return ComptimePtrLoadKit{ 13086 .root_val = parent.root_val, 13087 .val = try parent.val.fieldValue(sema.arena, field_index), 13088 .ty = parent.ty.structFieldType(field_index), 13089 .byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset), 13090 .is_mutable = parent.is_mutable, 13091 }; 13092 }, 13093 .eu_payload_ptr => { 13094 const err_union_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; 13095 const parent = try beginComptimePtrLoad(sema, block, src, err_union_ptr); 13096 return ComptimePtrLoadKit{ 13097 .root_val = parent.root_val, 13098 .val = parent.val.castTag(.eu_payload).?.data, 13099 .ty = parent.ty.errorUnionPayload(), 13100 .byte_offset = undefined, 13101 .is_mutable = parent.is_mutable, 13102 }; 13103 }, 13104 .opt_payload_ptr => { 13105 const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; 13106 const parent = try beginComptimePtrLoad(sema, block, src, opt_ptr); 13107 return ComptimePtrLoadKit{ 13108 .root_val = parent.root_val, 13109 .val = parent.val.castTag(.opt_payload).?.data, 13110 .ty = try parent.ty.optionalChildAlloc(sema.arena), 13111 .byte_offset = undefined, 13112 .is_mutable = parent.is_mutable, 13113 }; 13114 }, 13115 13116 .zero, 13117 .one, 13118 .int_u64, 13119 .int_i64, 13120 .int_big_positive, 13121 .int_big_negative, 13122 .variable, 13123 .extern_fn, 13124 .function, 13125 => return error.RuntimeLoad, 13126 13127 else => unreachable, 13128 } 13129} 13130 13131fn bitCast( 13132 sema: *Sema, 13133 block: *Block, 13134 dest_ty: Type, 13135 inst: Air.Inst.Ref, 13136 inst_src: LazySrcLoc, 13137) CompileError!Air.Inst.Ref { 13138 // TODO validate the type size and other compile errors 13139 if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| { 13140 const old_ty = sema.typeOf(inst); 13141 const result_val = try sema.bitCastVal(block, inst_src, val, old_ty, dest_ty); 13142 return sema.addConstant(dest_ty, result_val); 13143 } 13144 try sema.requireRuntimeBlock(block, inst_src); 13145 return block.addBitCast(dest_ty, inst); 13146} 13147 13148pub fn bitCastVal( 13149 sema: *Sema, 13150 block: *Block, 13151 src: LazySrcLoc, 13152 val: Value, 13153 old_ty: Type, 13154 new_ty: Type, 13155) !Value { 13156 if (old_ty.eql(new_ty)) return val; 13157 13158 // For types with well-defined memory layouts, we serialize them a byte buffer, 13159 // then deserialize to the new type. 13160 const target = sema.mod.getTarget(); 13161 const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(target)); 13162 const buffer = try sema.gpa.alloc(u8, abi_size); 13163 defer sema.gpa.free(buffer); 13164 val.writeToMemory(old_ty, target, buffer); 13165 return Value.readFromMemory(new_ty, target, buffer, sema.arena); 13166} 13167 13168fn coerceArrayPtrToSlice( 13169 sema: *Sema, 13170 block: *Block, 13171 dest_ty: Type, 13172 inst: Air.Inst.Ref, 13173 inst_src: LazySrcLoc, 13174) CompileError!Air.Inst.Ref { 13175 if (try sema.resolveDefinedValue(block, inst_src, inst)) |val| { 13176 const ptr_array_ty = sema.typeOf(inst); 13177 const array_ty = ptr_array_ty.childType(); 13178 const slice_val = try Value.Tag.slice.create(sema.arena, .{ 13179 .ptr = val, 13180 .len = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()), 13181 }); 13182 return sema.addConstant(dest_ty, slice_val); 13183 } 13184 try sema.requireRuntimeBlock(block, inst_src); 13185 return block.addTyOp(.array_to_slice, dest_ty, inst); 13186} 13187 13188fn coerceCompatiblePtrs( 13189 sema: *Sema, 13190 block: *Block, 13191 dest_ty: Type, 13192 inst: Air.Inst.Ref, 13193 inst_src: LazySrcLoc, 13194) !Air.Inst.Ref { 13195 if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| { 13196 // The comptime Value representation is compatible with both types. 13197 return sema.addConstant(dest_ty, val); 13198 } 13199 try sema.requireRuntimeBlock(block, inst_src); 13200 return sema.bitCast(block, dest_ty, inst, inst_src); 13201} 13202 13203fn coerceEnumToUnion( 13204 sema: *Sema, 13205 block: *Block, 13206 union_ty: Type, 13207 union_ty_src: LazySrcLoc, 13208 inst: Air.Inst.Ref, 13209 inst_src: LazySrcLoc, 13210) !Air.Inst.Ref { 13211 const inst_ty = sema.typeOf(inst); 13212 13213 const tag_ty = union_ty.unionTagType() orelse { 13214 const msg = msg: { 13215 const msg = try sema.errMsg(block, inst_src, "expected {}, found {}", .{ 13216 union_ty, inst_ty, 13217 }); 13218 errdefer msg.destroy(sema.gpa); 13219 try sema.errNote(block, union_ty_src, msg, "cannot coerce enum to untagged union", .{}); 13220 try sema.addDeclaredHereNote(msg, union_ty); 13221 break :msg msg; 13222 }; 13223 return sema.failWithOwnedErrorMsg(msg); 13224 }; 13225 13226 const enum_tag = try sema.coerce(block, tag_ty, inst, inst_src); 13227 if (try sema.resolveDefinedValue(block, inst_src, enum_tag)) |val| { 13228 const union_obj = union_ty.cast(Type.Payload.Union).?.data; 13229 const field_index = union_obj.tag_ty.enumTagFieldIndex(val) orelse { 13230 const msg = msg: { 13231 const msg = try sema.errMsg(block, inst_src, "union {} has no tag with value {}", .{ 13232 union_ty, val, 13233 }); 13234 errdefer msg.destroy(sema.gpa); 13235 try sema.addDeclaredHereNote(msg, union_ty); 13236 break :msg msg; 13237 }; 13238 return sema.failWithOwnedErrorMsg(msg); 13239 }; 13240 const field = union_obj.fields.values()[field_index]; 13241 const field_ty = try sema.resolveTypeFields(block, inst_src, field.ty); 13242 const opv = (try sema.typeHasOnePossibleValue(block, inst_src, field_ty)) orelse { 13243 // TODO resolve the field names and include in the error message, 13244 // also instead of 'union declared here' make it 'field "foo" declared here'. 13245 const msg = msg: { 13246 const msg = try sema.errMsg(block, inst_src, "coercion to union {} must initialize {} field", .{ 13247 union_ty, field_ty, 13248 }); 13249 errdefer msg.destroy(sema.gpa); 13250 try sema.addDeclaredHereNote(msg, union_ty); 13251 break :msg msg; 13252 }; 13253 return sema.failWithOwnedErrorMsg(msg); 13254 }; 13255 13256 return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{ 13257 .tag = val, 13258 .val = opv, 13259 })); 13260 } 13261 13262 try sema.requireRuntimeBlock(block, inst_src); 13263 13264 if (tag_ty.isNonexhaustiveEnum()) { 13265 const msg = msg: { 13266 const msg = try sema.errMsg(block, inst_src, "runtime coercion to union {} from non-exhaustive enum", .{ 13267 union_ty, 13268 }); 13269 errdefer msg.destroy(sema.gpa); 13270 try sema.addDeclaredHereNote(msg, tag_ty); 13271 break :msg msg; 13272 }; 13273 return sema.failWithOwnedErrorMsg(msg); 13274 } 13275 13276 // If the union has all fields 0 bits, the union value is just the enum value. 13277 if (union_ty.unionHasAllZeroBitFieldTypes()) { 13278 return block.addBitCast(union_ty, enum_tag); 13279 } 13280 13281 // TODO resolve the field names and add a hint that says "field 'foo' has type 'bar'" 13282 // instead of the "union declared here" hint 13283 const msg = msg: { 13284 const msg = try sema.errMsg(block, inst_src, "runtime coercion to union {} which has non-void fields", .{ 13285 union_ty, 13286 }); 13287 errdefer msg.destroy(sema.gpa); 13288 try sema.addDeclaredHereNote(msg, union_ty); 13289 break :msg msg; 13290 }; 13291 return sema.failWithOwnedErrorMsg(msg); 13292} 13293 13294// Coerces vectors/arrays which have the same in-memory layout. This can be used for 13295// both coercing from and to vectors. 13296fn coerceVectorInMemory( 13297 sema: *Sema, 13298 block: *Block, 13299 dest_ty: Type, 13300 dest_ty_src: LazySrcLoc, 13301 inst: Air.Inst.Ref, 13302 inst_src: LazySrcLoc, 13303) !Air.Inst.Ref { 13304 const inst_ty = sema.typeOf(inst); 13305 const inst_len = inst_ty.arrayLen(); 13306 const dest_len = dest_ty.arrayLen(); 13307 13308 if (dest_len != inst_len) { 13309 const msg = msg: { 13310 const msg = try sema.errMsg(block, inst_src, "expected {}, found {}", .{ 13311 dest_ty, inst_ty, 13312 }); 13313 errdefer msg.destroy(sema.gpa); 13314 try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len}); 13315 try sema.errNote(block, inst_src, msg, "source has length {d}", .{inst_len}); 13316 break :msg msg; 13317 }; 13318 return sema.failWithOwnedErrorMsg(msg); 13319 } 13320 13321 const target = sema.mod.getTarget(); 13322 const dest_elem_ty = dest_ty.childType(); 13323 const inst_elem_ty = inst_ty.childType(); 13324 const in_memory_result = coerceInMemoryAllowed(dest_elem_ty, inst_elem_ty, false, target); 13325 if (in_memory_result != .ok) { 13326 // TODO recursive error notes for coerceInMemoryAllowed failure 13327 return sema.fail(block, inst_src, "expected {}, found {}", .{ dest_ty, inst_ty }); 13328 } 13329 13330 if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |inst_val| { 13331 // These types share the same comptime value representation. 13332 return sema.addConstant(dest_ty, inst_val); 13333 } 13334 13335 try sema.requireRuntimeBlock(block, inst_src); 13336 return block.addBitCast(dest_ty, inst); 13337} 13338 13339fn analyzeDeclVal( 13340 sema: *Sema, 13341 block: *Block, 13342 src: LazySrcLoc, 13343 decl: *Decl, 13344) CompileError!Air.Inst.Ref { 13345 if (sema.decl_val_table.get(decl)) |result| { 13346 return result; 13347 } 13348 const decl_ref = try sema.analyzeDeclRef(decl); 13349 const result = try sema.analyzeLoad(block, src, decl_ref, src); 13350 if (Air.refToIndex(result)) |index| { 13351 if (sema.air_instructions.items(.tag)[index] == .constant) { 13352 try sema.decl_val_table.put(sema.gpa, decl, result); 13353 } 13354 } 13355 return result; 13356} 13357 13358fn ensureDeclAnalyzed(sema: *Sema, decl: *Decl) CompileError!void { 13359 sema.mod.ensureDeclAnalyzed(decl) catch |err| { 13360 if (sema.owner_func) |owner_func| { 13361 owner_func.state = .dependency_failure; 13362 } else { 13363 sema.owner_decl.analysis = .dependency_failure; 13364 } 13365 return err; 13366 }; 13367} 13368 13369fn analyzeDeclRef(sema: *Sema, decl: *Decl) CompileError!Air.Inst.Ref { 13370 try sema.mod.declareDeclDependency(sema.owner_decl, decl); 13371 try sema.ensureDeclAnalyzed(decl); 13372 13373 const decl_tv = try decl.typedValue(); 13374 if (decl_tv.val.castTag(.variable)) |payload| { 13375 const variable = payload.data; 13376 const alignment: u32 = if (decl.align_val.tag() == .null_value) 13377 0 13378 else 13379 @intCast(u32, decl.align_val.toUnsignedInt()); 13380 const ty = try Type.ptr(sema.arena, .{ 13381 .pointee_type = decl_tv.ty, 13382 .mutable = variable.is_mutable, 13383 .@"addrspace" = decl.@"addrspace", 13384 .@"align" = alignment, 13385 }); 13386 return sema.addConstant(ty, try Value.Tag.decl_ref.create(sema.arena, decl)); 13387 } 13388 return sema.addConstant( 13389 try Type.ptr(sema.arena, .{ 13390 .pointee_type = decl_tv.ty, 13391 .mutable = false, 13392 .@"addrspace" = decl.@"addrspace", 13393 }), 13394 try Value.Tag.decl_ref.create(sema.arena, decl), 13395 ); 13396} 13397 13398fn analyzeRef( 13399 sema: *Sema, 13400 block: *Block, 13401 src: LazySrcLoc, 13402 operand: Air.Inst.Ref, 13403) CompileError!Air.Inst.Ref { 13404 const operand_ty = sema.typeOf(operand); 13405 13406 if (try sema.resolveMaybeUndefVal(block, src, operand)) |val| { 13407 var anon_decl = try block.startAnonDecl(); 13408 defer anon_decl.deinit(); 13409 return sema.analyzeDeclRef(try anon_decl.finish( 13410 try operand_ty.copy(anon_decl.arena()), 13411 try val.copy(anon_decl.arena()), 13412 )); 13413 } 13414 13415 try sema.requireRuntimeBlock(block, src); 13416 const address_space = target_util.defaultAddressSpace(sema.mod.getTarget(), .local); 13417 const ptr_type = try Type.ptr(sema.arena, .{ 13418 .pointee_type = operand_ty, 13419 .mutable = false, 13420 .@"addrspace" = address_space, 13421 }); 13422 const mut_ptr_type = try Type.ptr(sema.arena, .{ 13423 .pointee_type = operand_ty, 13424 .@"addrspace" = address_space, 13425 }); 13426 const alloc = try block.addTy(.alloc, mut_ptr_type); 13427 try sema.storePtr(block, src, alloc, operand); 13428 13429 // TODO: Replace with sema.coerce when that supports adding pointer constness. 13430 return sema.bitCast(block, ptr_type, alloc, src); 13431} 13432 13433fn analyzeLoad( 13434 sema: *Sema, 13435 block: *Block, 13436 src: LazySrcLoc, 13437 ptr: Air.Inst.Ref, 13438 ptr_src: LazySrcLoc, 13439) CompileError!Air.Inst.Ref { 13440 const ptr_ty = sema.typeOf(ptr); 13441 const elem_ty = switch (ptr_ty.zigTypeTag()) { 13442 .Pointer => ptr_ty.childType(), 13443 else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty}), 13444 }; 13445 if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| { 13446 if (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) |elem_val| { 13447 return sema.addConstant(elem_ty, elem_val); 13448 } 13449 } 13450 13451 try sema.requireRuntimeBlock(block, src); 13452 return block.addTyOp(.load, elem_ty, ptr); 13453} 13454 13455fn analyzeSlicePtr( 13456 sema: *Sema, 13457 block: *Block, 13458 src: LazySrcLoc, 13459 slice: Air.Inst.Ref, 13460 slice_ty: Type, 13461 slice_src: LazySrcLoc, 13462) CompileError!Air.Inst.Ref { 13463 const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); 13464 const result_ty = slice_ty.slicePtrFieldType(buf); 13465 13466 if (try sema.resolveMaybeUndefVal(block, slice_src, slice)) |val| { 13467 if (val.isUndef()) return sema.addConstUndef(result_ty); 13468 return sema.addConstant(result_ty, val.slicePtr()); 13469 } 13470 try sema.requireRuntimeBlock(block, src); 13471 return block.addTyOp(.slice_ptr, result_ty, slice); 13472} 13473 13474fn analyzeSliceLen( 13475 sema: *Sema, 13476 block: *Block, 13477 src: LazySrcLoc, 13478 slice_inst: Air.Inst.Ref, 13479) CompileError!Air.Inst.Ref { 13480 if (try sema.resolveMaybeUndefVal(block, src, slice_inst)) |slice_val| { 13481 if (slice_val.isUndef()) { 13482 return sema.addConstUndef(Type.usize); 13483 } 13484 return sema.addIntUnsigned(Type.usize, slice_val.sliceLen()); 13485 } 13486 try sema.requireRuntimeBlock(block, src); 13487 return block.addTyOp(.slice_len, Type.usize, slice_inst); 13488} 13489 13490fn analyzeIsNull( 13491 sema: *Sema, 13492 block: *Block, 13493 src: LazySrcLoc, 13494 operand: Air.Inst.Ref, 13495 invert_logic: bool, 13496) CompileError!Air.Inst.Ref { 13497 const result_ty = Type.initTag(.bool); 13498 if (try sema.resolveMaybeUndefVal(block, src, operand)) |opt_val| { 13499 if (opt_val.isUndef()) { 13500 return sema.addConstUndef(result_ty); 13501 } 13502 const is_null = opt_val.isNull(); 13503 const bool_value = if (invert_logic) !is_null else is_null; 13504 if (bool_value) { 13505 return Air.Inst.Ref.bool_true; 13506 } else { 13507 return Air.Inst.Ref.bool_false; 13508 } 13509 } 13510 try sema.requireRuntimeBlock(block, src); 13511 const air_tag: Air.Inst.Tag = if (invert_logic) .is_non_null else .is_null; 13512 return block.addUnOp(air_tag, operand); 13513} 13514 13515fn analyzeIsNonErr( 13516 sema: *Sema, 13517 block: *Block, 13518 src: LazySrcLoc, 13519 operand: Air.Inst.Ref, 13520) CompileError!Air.Inst.Ref { 13521 const operand_ty = sema.typeOf(operand); 13522 const ot = operand_ty.zigTypeTag(); 13523 if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true; 13524 if (ot == .ErrorSet) return Air.Inst.Ref.bool_false; 13525 assert(ot == .ErrorUnion); 13526 const result_ty = Type.initTag(.bool); 13527 if (try sema.resolveMaybeUndefVal(block, src, operand)) |err_union| { 13528 if (err_union.isUndef()) { 13529 return sema.addConstUndef(result_ty); 13530 } 13531 if (err_union.getError() == null) { 13532 return Air.Inst.Ref.bool_true; 13533 } else { 13534 return Air.Inst.Ref.bool_false; 13535 } 13536 } 13537 try sema.requireRuntimeBlock(block, src); 13538 return block.addUnOp(.is_non_err, operand); 13539} 13540 13541fn analyzeSlice( 13542 sema: *Sema, 13543 block: *Block, 13544 src: LazySrcLoc, 13545 ptr_ptr: Air.Inst.Ref, 13546 uncasted_start: Air.Inst.Ref, 13547 uncasted_end_opt: Air.Inst.Ref, 13548 sentinel_opt: Air.Inst.Ref, 13549 sentinel_src: LazySrcLoc, 13550) CompileError!Air.Inst.Ref { 13551 const ptr_src = src; // TODO better source location 13552 const start_src = src; // TODO better source location 13553 const end_src = src; // TODO better source location 13554 // Slice expressions can operate on a variable whose type is an array. This requires 13555 // the slice operand to be a pointer. In the case of a non-array, it will be a double pointer. 13556 const ptr_ptr_ty = sema.typeOf(ptr_ptr); 13557 const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag()) { 13558 .Pointer => ptr_ptr_ty.elemType(), 13559 else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty}), 13560 }; 13561 13562 var array_ty = ptr_ptr_child_ty; 13563 var slice_ty = ptr_ptr_ty; 13564 var ptr_or_slice = ptr_ptr; 13565 var elem_ty = ptr_ptr_child_ty.childType(); 13566 switch (ptr_ptr_child_ty.zigTypeTag()) { 13567 .Array => {}, 13568 .Pointer => switch (ptr_ptr_child_ty.ptrSize()) { 13569 .One => { 13570 const double_child_ty = ptr_ptr_child_ty.childType(); 13571 if (double_child_ty.zigTypeTag() == .Array) { 13572 ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); 13573 slice_ty = ptr_ptr_child_ty; 13574 array_ty = double_child_ty; 13575 elem_ty = double_child_ty.childType(); 13576 } else { 13577 return sema.fail(block, ptr_src, "slice of single-item pointer", .{}); 13578 } 13579 }, 13580 .Many, .C => { 13581 ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); 13582 slice_ty = ptr_ptr_child_ty; 13583 array_ty = ptr_ptr_child_ty; 13584 elem_ty = ptr_ptr_child_ty.childType(); 13585 }, 13586 .Slice => { 13587 ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); 13588 slice_ty = ptr_ptr_child_ty; 13589 array_ty = ptr_ptr_child_ty; 13590 elem_ty = ptr_ptr_child_ty.childType(); 13591 }, 13592 }, 13593 else => return sema.fail(block, ptr_src, "slice of non-array type '{}'", .{ptr_ptr_child_ty}), 13594 } 13595 13596 const ptr = if (slice_ty.isSlice()) 13597 try sema.analyzeSlicePtr(block, src, ptr_or_slice, slice_ty, ptr_src) 13598 else 13599 ptr_or_slice; 13600 13601 const start = try sema.coerce(block, Type.usize, uncasted_start, start_src); 13602 const new_ptr = try analyzePtrArithmetic(sema, block, src, ptr, start, .ptr_add, ptr_src, start_src); 13603 13604 const end = e: { 13605 if (uncasted_end_opt != .none) { 13606 break :e try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); 13607 } 13608 13609 if (array_ty.zigTypeTag() == .Array) { 13610 break :e try sema.addConstant( 13611 Type.usize, 13612 try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()), 13613 ); 13614 } else if (slice_ty.isSlice()) { 13615 break :e try sema.analyzeSliceLen(block, src, ptr_or_slice); 13616 } 13617 return sema.fail(block, end_src, "slice of pointer must include end value", .{}); 13618 }; 13619 13620 const slice_sentinel = if (sentinel_opt != .none) blk: { 13621 const casted = try sema.coerce(block, elem_ty, sentinel_opt, sentinel_src); 13622 break :blk try sema.resolveConstValue(block, sentinel_src, casted); 13623 } else null; 13624 13625 const new_len = try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src); 13626 13627 const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len); 13628 13629 const new_ptr_ty_info = sema.typeOf(new_ptr).ptrInfo().data; 13630 const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize() != .C; 13631 13632 if (opt_new_len_val) |new_len_val| { 13633 const new_len_int = new_len_val.toUnsignedInt(); 13634 13635 const sentinel = if (array_ty.zigTypeTag() == .Array and new_len_int == array_ty.arrayLen()) 13636 array_ty.sentinel() 13637 else 13638 slice_sentinel; 13639 13640 const return_ty = try Type.ptr(sema.arena, .{ 13641 .pointee_type = try Type.array(sema.arena, new_len_int, sentinel, elem_ty), 13642 .sentinel = null, 13643 .@"align" = new_ptr_ty_info.@"align", 13644 .@"addrspace" = new_ptr_ty_info.@"addrspace", 13645 .mutable = new_ptr_ty_info.mutable, 13646 .@"allowzero" = new_allowzero, 13647 .@"volatile" = new_ptr_ty_info.@"volatile", 13648 .size = .One, 13649 }); 13650 13651 const opt_new_ptr_val = try sema.resolveMaybeUndefVal(block, ptr_src, new_ptr); 13652 const new_ptr_val = opt_new_ptr_val orelse { 13653 return block.addBitCast(return_ty, new_ptr); 13654 }; 13655 13656 if (!new_ptr_val.isUndef()) { 13657 return sema.addConstant(return_ty, new_ptr_val); 13658 } 13659 13660 // Special case: @as([]i32, undefined)[x..x] 13661 if (new_len_int == 0) { 13662 return sema.addConstUndef(return_ty); 13663 } 13664 13665 return sema.fail(block, ptr_src, "non-zero length slice of undefined pointer", .{}); 13666 } 13667 13668 const return_ty = try Type.ptr(sema.arena, .{ 13669 .pointee_type = elem_ty, 13670 .sentinel = slice_sentinel, 13671 .@"align" = new_ptr_ty_info.@"align", 13672 .@"addrspace" = new_ptr_ty_info.@"addrspace", 13673 .mutable = new_ptr_ty_info.mutable, 13674 .@"allowzero" = new_allowzero, 13675 .@"volatile" = new_ptr_ty_info.@"volatile", 13676 .size = .Slice, 13677 }); 13678 13679 try sema.requireRuntimeBlock(block, src); 13680 return block.addInst(.{ 13681 .tag = .slice, 13682 .data = .{ .ty_pl = .{ 13683 .ty = try sema.addType(return_ty), 13684 .payload = try sema.addExtra(Air.Bin{ 13685 .lhs = new_ptr, 13686 .rhs = new_len, 13687 }), 13688 } }, 13689 }); 13690} 13691 13692/// Asserts that lhs and rhs types are both numeric. 13693fn cmpNumeric( 13694 sema: *Sema, 13695 block: *Block, 13696 src: LazySrcLoc, 13697 lhs: Air.Inst.Ref, 13698 rhs: Air.Inst.Ref, 13699 op: std.math.CompareOperator, 13700 lhs_src: LazySrcLoc, 13701 rhs_src: LazySrcLoc, 13702) CompileError!Air.Inst.Ref { 13703 const lhs_ty = sema.typeOf(lhs); 13704 const rhs_ty = sema.typeOf(rhs); 13705 13706 assert(lhs_ty.isNumeric()); 13707 assert(rhs_ty.isNumeric()); 13708 13709 const lhs_ty_tag = lhs_ty.zigTypeTag(); 13710 const rhs_ty_tag = rhs_ty.zigTypeTag(); 13711 13712 if (lhs_ty_tag == .Vector and rhs_ty_tag == .Vector) { 13713 if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { 13714 return sema.fail(block, src, "vector length mismatch: {d} and {d}", .{ 13715 lhs_ty.arrayLen(), rhs_ty.arrayLen(), 13716 }); 13717 } 13718 return sema.fail(block, src, "TODO implement support for vectors in cmpNumeric", .{}); 13719 } else if (lhs_ty_tag == .Vector or rhs_ty_tag == .Vector) { 13720 return sema.fail(block, src, "mixed scalar and vector operands to comparison operator: '{}' and '{}'", .{ 13721 lhs_ty, rhs_ty, 13722 }); 13723 } 13724 13725 const runtime_src: LazySrcLoc = src: { 13726 if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| { 13727 if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| { 13728 if (lhs_val.isUndef() or rhs_val.isUndef()) { 13729 return sema.addConstUndef(Type.initTag(.bool)); 13730 } 13731 if (Value.compareHetero(lhs_val, op, rhs_val)) { 13732 return Air.Inst.Ref.bool_true; 13733 } else { 13734 return Air.Inst.Ref.bool_false; 13735 } 13736 } else { 13737 break :src rhs_src; 13738 } 13739 } else { 13740 break :src lhs_src; 13741 } 13742 }; 13743 13744 // TODO handle comparisons against lazy zero values 13745 // Some values can be compared against zero without being runtime known or without forcing 13746 // a full resolution of their value, for example `@sizeOf(@Frame(function))` is known to 13747 // always be nonzero, and we benefit from not forcing the full evaluation and stack frame layout 13748 // of this function if we don't need to. 13749 try sema.requireRuntimeBlock(block, runtime_src); 13750 13751 // For floats, emit a float comparison instruction. 13752 const lhs_is_float = switch (lhs_ty_tag) { 13753 .Float, .ComptimeFloat => true, 13754 else => false, 13755 }; 13756 const rhs_is_float = switch (rhs_ty_tag) { 13757 .Float, .ComptimeFloat => true, 13758 else => false, 13759 }; 13760 const target = sema.mod.getTarget(); 13761 if (lhs_is_float and rhs_is_float) { 13762 // Implicit cast the smaller one to the larger one. 13763 const dest_ty = x: { 13764 if (lhs_ty_tag == .ComptimeFloat) { 13765 break :x rhs_ty; 13766 } else if (rhs_ty_tag == .ComptimeFloat) { 13767 break :x lhs_ty; 13768 } 13769 if (lhs_ty.floatBits(target) >= rhs_ty.floatBits(target)) { 13770 break :x lhs_ty; 13771 } else { 13772 break :x rhs_ty; 13773 } 13774 }; 13775 const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src); 13776 const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src); 13777 return block.addBinOp(Air.Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); 13778 } 13779 // For mixed unsigned integer sizes, implicit cast both operands to the larger integer. 13780 // For mixed signed and unsigned integers, implicit cast both operands to a signed 13781 // integer with + 1 bit. 13782 // For mixed floats and integers, extract the integer part from the float, cast that to 13783 // a signed integer with mantissa bits + 1, and if there was any non-integral part of the float, 13784 // add/subtract 1. 13785 const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| 13786 lhs_val.compareWithZero(.lt) 13787 else 13788 (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt()); 13789 const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| 13790 rhs_val.compareWithZero(.lt) 13791 else 13792 (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt()); 13793 const dest_int_is_signed = lhs_is_signed or rhs_is_signed; 13794 13795 var dest_float_type: ?Type = null; 13796 13797 var lhs_bits: usize = undefined; 13798 if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| { 13799 if (lhs_val.isUndef()) 13800 return sema.addConstUndef(Type.initTag(.bool)); 13801 const is_unsigned = if (lhs_is_float) x: { 13802 var bigint_space: Value.BigIntSpace = undefined; 13803 var bigint = try lhs_val.toBigInt(&bigint_space).toManaged(sema.gpa); 13804 defer bigint.deinit(); 13805 const zcmp = lhs_val.orderAgainstZero(); 13806 if (lhs_val.floatHasFraction()) { 13807 switch (op) { 13808 .eq => return Air.Inst.Ref.bool_false, 13809 .neq => return Air.Inst.Ref.bool_true, 13810 else => {}, 13811 } 13812 if (zcmp == .lt) { 13813 try bigint.addScalar(bigint.toConst(), -1); 13814 } else { 13815 try bigint.addScalar(bigint.toConst(), 1); 13816 } 13817 } 13818 lhs_bits = bigint.toConst().bitCountTwosComp(); 13819 break :x (zcmp != .lt); 13820 } else x: { 13821 lhs_bits = lhs_val.intBitCountTwosComp(); 13822 break :x (lhs_val.orderAgainstZero() != .lt); 13823 }; 13824 lhs_bits += @boolToInt(is_unsigned and dest_int_is_signed); 13825 } else if (lhs_is_float) { 13826 dest_float_type = lhs_ty; 13827 } else { 13828 const int_info = lhs_ty.intInfo(target); 13829 lhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed); 13830 } 13831 13832 var rhs_bits: usize = undefined; 13833 if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| { 13834 if (rhs_val.isUndef()) 13835 return sema.addConstUndef(Type.initTag(.bool)); 13836 const is_unsigned = if (rhs_is_float) x: { 13837 var bigint_space: Value.BigIntSpace = undefined; 13838 var bigint = try rhs_val.toBigInt(&bigint_space).toManaged(sema.gpa); 13839 defer bigint.deinit(); 13840 const zcmp = rhs_val.orderAgainstZero(); 13841 if (rhs_val.floatHasFraction()) { 13842 switch (op) { 13843 .eq => return Air.Inst.Ref.bool_false, 13844 .neq => return Air.Inst.Ref.bool_true, 13845 else => {}, 13846 } 13847 if (zcmp == .lt) { 13848 try bigint.addScalar(bigint.toConst(), -1); 13849 } else { 13850 try bigint.addScalar(bigint.toConst(), 1); 13851 } 13852 } 13853 rhs_bits = bigint.toConst().bitCountTwosComp(); 13854 break :x (zcmp != .lt); 13855 } else x: { 13856 rhs_bits = rhs_val.intBitCountTwosComp(); 13857 break :x (rhs_val.orderAgainstZero() != .lt); 13858 }; 13859 rhs_bits += @boolToInt(is_unsigned and dest_int_is_signed); 13860 } else if (rhs_is_float) { 13861 dest_float_type = rhs_ty; 13862 } else { 13863 const int_info = rhs_ty.intInfo(target); 13864 rhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed); 13865 } 13866 13867 const dest_ty = if (dest_float_type) |ft| ft else blk: { 13868 const max_bits = std.math.max(lhs_bits, rhs_bits); 13869 const casted_bits = std.math.cast(u16, max_bits) catch |err| switch (err) { 13870 error.Overflow => return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits}), 13871 }; 13872 const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned; 13873 break :blk try Module.makeIntType(sema.arena, signedness, casted_bits); 13874 }; 13875 const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src); 13876 const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src); 13877 13878 return block.addBinOp(Air.Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); 13879} 13880 13881fn wrapOptional( 13882 sema: *Sema, 13883 block: *Block, 13884 dest_ty: Type, 13885 inst: Air.Inst.Ref, 13886 inst_src: LazySrcLoc, 13887) !Air.Inst.Ref { 13888 if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| { 13889 return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, val)); 13890 } 13891 13892 try sema.requireRuntimeBlock(block, inst_src); 13893 return block.addTyOp(.wrap_optional, dest_ty, inst); 13894} 13895 13896fn wrapErrorUnion( 13897 sema: *Sema, 13898 block: *Block, 13899 dest_ty: Type, 13900 inst: Air.Inst.Ref, 13901 inst_src: LazySrcLoc, 13902) !Air.Inst.Ref { 13903 const inst_ty = sema.typeOf(inst); 13904 const dest_err_set_ty = dest_ty.errorUnionSet(); 13905 const dest_payload_ty = dest_ty.errorUnionPayload(); 13906 if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| { 13907 if (inst_ty.zigTypeTag() != .ErrorSet) { 13908 _ = try sema.coerce(block, dest_payload_ty, inst, inst_src); 13909 return sema.addConstant(dest_ty, try Value.Tag.eu_payload.create(sema.arena, val)); 13910 } 13911 switch (dest_err_set_ty.tag()) { 13912 .anyerror => {}, 13913 .error_set_single => ok: { 13914 const expected_name = val.castTag(.@"error").?.data.name; 13915 const n = dest_err_set_ty.castTag(.error_set_single).?.data; 13916 if (mem.eql(u8, expected_name, n)) break :ok; 13917 return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); 13918 }, 13919 .error_set => ok: { 13920 const expected_name = val.castTag(.@"error").?.data.name; 13921 const error_set = dest_err_set_ty.castTag(.error_set).?.data; 13922 const names = error_set.names_ptr[0..error_set.names_len]; 13923 // TODO this is O(N). I'm putting off solving this until we solve inferred 13924 // error sets at the same time. 13925 for (names) |name| { 13926 if (mem.eql(u8, expected_name, name)) break :ok; 13927 } 13928 return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); 13929 }, 13930 .error_set_inferred => ok: { 13931 const err_set_payload = dest_err_set_ty.castTag(.error_set_inferred).?.data; 13932 if (err_set_payload.is_anyerror) break :ok; 13933 const expected_name = val.castTag(.@"error").?.data.name; 13934 if (err_set_payload.map.contains(expected_name)) break :ok; 13935 // TODO error set resolution here before emitting a compile error 13936 return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); 13937 }, 13938 else => unreachable, 13939 } 13940 return sema.addConstant(dest_ty, val); 13941 } 13942 13943 try sema.requireRuntimeBlock(block, inst_src); 13944 13945 // we are coercing from E to E!T 13946 if (inst_ty.zigTypeTag() == .ErrorSet) { 13947 var coerced = try sema.coerce(block, dest_err_set_ty, inst, inst_src); 13948 return block.addTyOp(.wrap_errunion_err, dest_ty, coerced); 13949 } else { 13950 var coerced = try sema.coerce(block, dest_payload_ty, inst, inst_src); 13951 return block.addTyOp(.wrap_errunion_payload, dest_ty, coerced); 13952 } 13953} 13954 13955fn unionToTag( 13956 sema: *Sema, 13957 block: *Block, 13958 enum_ty: Type, 13959 un: Air.Inst.Ref, 13960 un_src: LazySrcLoc, 13961) !Air.Inst.Ref { 13962 if ((try sema.typeHasOnePossibleValue(block, un_src, enum_ty))) |opv| { 13963 return sema.addConstant(enum_ty, opv); 13964 } 13965 if (try sema.resolveMaybeUndefVal(block, un_src, un)) |un_val| { 13966 return sema.addConstant(enum_ty, un_val.unionTag()); 13967 } 13968 try sema.requireRuntimeBlock(block, un_src); 13969 return block.addTyOp(.get_union_tag, enum_ty, un); 13970} 13971 13972fn resolvePeerTypes( 13973 sema: *Sema, 13974 block: *Block, 13975 src: LazySrcLoc, 13976 instructions: []Air.Inst.Ref, 13977 candidate_srcs: Module.PeerTypeCandidateSrc, 13978) !Type { 13979 if (instructions.len == 0) 13980 return Type.initTag(.noreturn); 13981 13982 if (instructions.len == 1) 13983 return sema.typeOf(instructions[0]); 13984 13985 const target = sema.mod.getTarget(); 13986 13987 var chosen = instructions[0]; 13988 var any_are_null = false; 13989 var chosen_i: usize = 0; 13990 for (instructions[1..]) |candidate, candidate_i| { 13991 const candidate_ty = sema.typeOf(candidate); 13992 const chosen_ty = sema.typeOf(chosen); 13993 if (candidate_ty.eql(chosen_ty)) 13994 continue; 13995 const candidate_ty_tag = candidate_ty.zigTypeTag(); 13996 const chosen_ty_tag = chosen_ty.zigTypeTag(); 13997 13998 switch (candidate_ty_tag) { 13999 .NoReturn, .Undefined => continue, 14000 14001 .Null => { 14002 any_are_null = true; 14003 continue; 14004 }, 14005 14006 .Int => switch (chosen_ty_tag) { 14007 .ComptimeInt => { 14008 chosen = candidate; 14009 chosen_i = candidate_i + 1; 14010 continue; 14011 }, 14012 .Int => { 14013 if (chosen_ty.isSignedInt() == candidate_ty.isSignedInt()) { 14014 if (chosen_ty.intInfo(target).bits < candidate_ty.intInfo(target).bits) { 14015 chosen = candidate; 14016 chosen_i = candidate_i + 1; 14017 } 14018 continue; 14019 } 14020 }, 14021 .Pointer => if (chosen_ty.ptrSize() == .C) continue, 14022 else => {}, 14023 }, 14024 .ComptimeInt => switch (chosen_ty_tag) { 14025 .Int, .Float, .ComptimeFloat => continue, 14026 .Pointer => if (chosen_ty.ptrSize() == .C) continue, 14027 else => {}, 14028 }, 14029 .Float => switch (chosen_ty_tag) { 14030 .Float => { 14031 if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) { 14032 chosen = candidate; 14033 chosen_i = candidate_i + 1; 14034 } 14035 continue; 14036 }, 14037 .ComptimeFloat, .ComptimeInt => { 14038 chosen = candidate; 14039 chosen_i = candidate_i + 1; 14040 continue; 14041 }, 14042 else => {}, 14043 }, 14044 .ComptimeFloat => switch (chosen_ty_tag) { 14045 .Float => continue, 14046 .ComptimeInt => { 14047 chosen = candidate; 14048 chosen_i = candidate_i + 1; 14049 continue; 14050 }, 14051 else => {}, 14052 }, 14053 .Enum => switch (chosen_ty_tag) { 14054 .EnumLiteral => { 14055 chosen = candidate; 14056 chosen_i = candidate_i + 1; 14057 continue; 14058 }, 14059 else => {}, 14060 }, 14061 .EnumLiteral => switch (chosen_ty_tag) { 14062 .Enum => continue, 14063 else => {}, 14064 }, 14065 .Pointer => { 14066 if (candidate_ty.ptrSize() == .C) { 14067 if (chosen_ty_tag == .Int or chosen_ty_tag == .ComptimeInt) { 14068 chosen = candidate; 14069 chosen_i = candidate_i + 1; 14070 continue; 14071 } 14072 if (chosen_ty_tag == .Pointer and chosen_ty.ptrSize() != .Slice) { 14073 continue; 14074 } 14075 } 14076 }, 14077 .Optional => { 14078 var opt_child_buf: Type.Payload.ElemType = undefined; 14079 const opt_child_ty = candidate_ty.optionalChild(&opt_child_buf); 14080 if (coerceInMemoryAllowed(opt_child_ty, chosen_ty, false, target) == .ok) { 14081 chosen = candidate; 14082 chosen_i = candidate_i + 1; 14083 continue; 14084 } 14085 if (coerceInMemoryAllowed(chosen_ty, opt_child_ty, false, target) == .ok) { 14086 any_are_null = true; 14087 continue; 14088 } 14089 }, 14090 else => {}, 14091 } 14092 14093 switch (chosen_ty_tag) { 14094 .NoReturn, .Undefined => { 14095 chosen = candidate; 14096 chosen_i = candidate_i + 1; 14097 continue; 14098 }, 14099 .Null => { 14100 any_are_null = true; 14101 chosen = candidate; 14102 chosen_i = candidate_i + 1; 14103 continue; 14104 }, 14105 .Optional => { 14106 var opt_child_buf: Type.Payload.ElemType = undefined; 14107 const opt_child_ty = chosen_ty.optionalChild(&opt_child_buf); 14108 if (coerceInMemoryAllowed(opt_child_ty, candidate_ty, false, target) == .ok) { 14109 continue; 14110 } 14111 if (coerceInMemoryAllowed(candidate_ty, opt_child_ty, false, target) == .ok) { 14112 any_are_null = true; 14113 chosen = candidate; 14114 chosen_i = candidate_i + 1; 14115 continue; 14116 } 14117 }, 14118 else => {}, 14119 } 14120 14121 // At this point, we hit a compile error. We need to recover 14122 // the source locations. 14123 const chosen_src = candidate_srcs.resolve( 14124 sema.gpa, 14125 block.src_decl, 14126 chosen_i, 14127 ); 14128 const candidate_src = candidate_srcs.resolve( 14129 sema.gpa, 14130 block.src_decl, 14131 candidate_i + 1, 14132 ); 14133 14134 const msg = msg: { 14135 const msg = try sema.errMsg(block, src, "incompatible types: '{}' and '{}'", .{ chosen_ty, candidate_ty }); 14136 errdefer msg.destroy(sema.gpa); 14137 14138 if (chosen_src) |src_loc| 14139 try sema.errNote(block, src_loc, msg, "type '{}' here", .{chosen_ty}); 14140 14141 if (candidate_src) |src_loc| 14142 try sema.errNote(block, src_loc, msg, "type '{}' here", .{candidate_ty}); 14143 14144 break :msg msg; 14145 }; 14146 return sema.failWithOwnedErrorMsg(msg); 14147 } 14148 14149 const chosen_ty = sema.typeOf(chosen); 14150 14151 if (any_are_null) { 14152 switch (chosen_ty.zigTypeTag()) { 14153 .Null, .Optional => return chosen_ty, 14154 else => return Type.optional(sema.arena, chosen_ty), 14155 } 14156 } 14157 14158 return chosen_ty; 14159} 14160 14161pub fn resolveTypeLayout( 14162 sema: *Sema, 14163 block: *Block, 14164 src: LazySrcLoc, 14165 ty: Type, 14166) CompileError!void { 14167 switch (ty.zigTypeTag()) { 14168 .Struct => { 14169 const resolved_ty = try sema.resolveTypeFields(block, src, ty); 14170 const struct_obj = resolved_ty.castTag(.@"struct").?.data; 14171 switch (struct_obj.status) { 14172 .none, .have_field_types => {}, 14173 .field_types_wip, .layout_wip => { 14174 return sema.fail(block, src, "struct {} depends on itself", .{ty}); 14175 }, 14176 .have_layout => return, 14177 } 14178 struct_obj.status = .layout_wip; 14179 for (struct_obj.fields.values()) |field| { 14180 try sema.resolveTypeLayout(block, src, field.ty); 14181 } 14182 struct_obj.status = .have_layout; 14183 }, 14184 .Union => { 14185 const resolved_ty = try sema.resolveTypeFields(block, src, ty); 14186 const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; 14187 switch (union_obj.status) { 14188 .none, .have_field_types => {}, 14189 .field_types_wip, .layout_wip => { 14190 return sema.fail(block, src, "union {} depends on itself", .{ty}); 14191 }, 14192 .have_layout => return, 14193 } 14194 union_obj.status = .layout_wip; 14195 for (union_obj.fields.values()) |field| { 14196 try sema.resolveTypeLayout(block, src, field.ty); 14197 } 14198 union_obj.status = .have_layout; 14199 }, 14200 .Array => { 14201 const elem_ty = ty.childType(); 14202 return sema.resolveTypeLayout(block, src, elem_ty); 14203 }, 14204 .Optional => { 14205 var buf: Type.Payload.ElemType = undefined; 14206 const payload_ty = ty.optionalChild(&buf); 14207 return sema.resolveTypeLayout(block, src, payload_ty); 14208 }, 14209 .ErrorUnion => { 14210 const payload_ty = ty.errorUnionPayload(); 14211 return sema.resolveTypeLayout(block, src, payload_ty); 14212 }, 14213 else => {}, 14214 } 14215} 14216 14217fn resolveTypeFields(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!Type { 14218 switch (ty.tag()) { 14219 .@"struct" => { 14220 const struct_obj = ty.castTag(.@"struct").?.data; 14221 switch (struct_obj.status) { 14222 .none => {}, 14223 .field_types_wip => { 14224 return sema.fail(block, src, "struct {} depends on itself", .{ty}); 14225 }, 14226 .have_field_types, .have_layout, .layout_wip => return ty, 14227 } 14228 14229 struct_obj.status = .field_types_wip; 14230 try semaStructFields(sema.mod, struct_obj); 14231 struct_obj.status = .have_field_types; 14232 14233 return ty; 14234 }, 14235 .type_info => return sema.resolveBuiltinTypeFields(block, src, "TypeInfo"), 14236 .extern_options => return sema.resolveBuiltinTypeFields(block, src, "ExternOptions"), 14237 .export_options => return sema.resolveBuiltinTypeFields(block, src, "ExportOptions"), 14238 .atomic_order => return sema.resolveBuiltinTypeFields(block, src, "AtomicOrder"), 14239 .atomic_rmw_op => return sema.resolveBuiltinTypeFields(block, src, "AtomicRmwOp"), 14240 .calling_convention => return sema.resolveBuiltinTypeFields(block, src, "CallingConvention"), 14241 .address_space => return sema.resolveBuiltinTypeFields(block, src, "AddressSpace"), 14242 .float_mode => return sema.resolveBuiltinTypeFields(block, src, "FloatMode"), 14243 .reduce_op => return sema.resolveBuiltinTypeFields(block, src, "ReduceOp"), 14244 .call_options => return sema.resolveBuiltinTypeFields(block, src, "CallOptions"), 14245 .prefetch_options => return sema.resolveBuiltinTypeFields(block, src, "PrefetchOptions"), 14246 14247 .@"union", .union_tagged => { 14248 const union_obj = ty.cast(Type.Payload.Union).?.data; 14249 switch (union_obj.status) { 14250 .none => {}, 14251 .field_types_wip => { 14252 return sema.fail(block, src, "union {} depends on itself", .{ty}); 14253 }, 14254 .have_field_types, .have_layout, .layout_wip => return ty, 14255 } 14256 14257 union_obj.status = .field_types_wip; 14258 try semaUnionFields(sema.mod, union_obj); 14259 union_obj.status = .have_field_types; 14260 14261 return ty; 14262 }, 14263 else => return ty, 14264 } 14265} 14266 14267fn resolveBuiltinTypeFields( 14268 sema: *Sema, 14269 block: *Block, 14270 src: LazySrcLoc, 14271 name: []const u8, 14272) CompileError!Type { 14273 const resolved_ty = try sema.getBuiltinType(block, src, name); 14274 return sema.resolveTypeFields(block, src, resolved_ty); 14275} 14276 14277fn semaStructFields( 14278 mod: *Module, 14279 struct_obj: *Module.Struct, 14280) CompileError!void { 14281 const tracy = trace(@src()); 14282 defer tracy.end(); 14283 14284 const gpa = mod.gpa; 14285 const decl = struct_obj.owner_decl; 14286 const zir = struct_obj.namespace.file_scope.zir; 14287 const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; 14288 assert(extended.opcode == .struct_decl); 14289 const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); 14290 var extra_index: usize = extended.operand; 14291 14292 const src: LazySrcLoc = .{ .node_offset = struct_obj.node_offset }; 14293 extra_index += @boolToInt(small.has_src_node); 14294 14295 const body_len = if (small.has_body_len) blk: { 14296 const body_len = zir.extra[extra_index]; 14297 extra_index += 1; 14298 break :blk body_len; 14299 } else 0; 14300 14301 const fields_len = if (small.has_fields_len) blk: { 14302 const fields_len = zir.extra[extra_index]; 14303 extra_index += 1; 14304 break :blk fields_len; 14305 } else 0; 14306 14307 const decls_len = if (small.has_decls_len) decls_len: { 14308 const decls_len = zir.extra[extra_index]; 14309 extra_index += 1; 14310 break :decls_len decls_len; 14311 } else 0; 14312 14313 // Skip over decls. 14314 var decls_it = zir.declIteratorInner(extra_index, decls_len); 14315 while (decls_it.next()) |_| {} 14316 extra_index = decls_it.extra_index; 14317 14318 const body = zir.extra[extra_index..][0..body_len]; 14319 if (fields_len == 0) { 14320 assert(body.len == 0); 14321 return; 14322 } 14323 extra_index += body.len; 14324 14325 var decl_arena = decl.value_arena.?.promote(gpa); 14326 defer decl.value_arena.?.* = decl_arena.state; 14327 const decl_arena_allocator = decl_arena.allocator(); 14328 14329 var analysis_arena = std.heap.ArenaAllocator.init(gpa); 14330 defer analysis_arena.deinit(); 14331 14332 var sema: Sema = .{ 14333 .mod = mod, 14334 .gpa = gpa, 14335 .arena = analysis_arena.allocator(), 14336 .perm_arena = decl_arena_allocator, 14337 .code = zir, 14338 .owner_decl = decl, 14339 .func = null, 14340 .fn_ret_ty = Type.void, 14341 .owner_func = null, 14342 }; 14343 defer sema.deinit(); 14344 14345 var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); 14346 defer wip_captures.deinit(); 14347 14348 var block_scope: Block = .{ 14349 .parent = null, 14350 .sema = &sema, 14351 .src_decl = decl, 14352 .namespace = &struct_obj.namespace, 14353 .wip_capture_scope = wip_captures.scope, 14354 .instructions = .{}, 14355 .inlining = null, 14356 .is_comptime = true, 14357 }; 14358 defer { 14359 assert(block_scope.instructions.items.len == 0); 14360 block_scope.params.deinit(gpa); 14361 } 14362 14363 if (body.len != 0) { 14364 _ = try sema.analyzeBody(&block_scope, body); 14365 } 14366 14367 try wip_captures.finalize(); 14368 14369 try struct_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); 14370 14371 const bits_per_field = 4; 14372 const fields_per_u32 = 32 / bits_per_field; 14373 const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable; 14374 var bit_bag_index: usize = extra_index; 14375 extra_index += bit_bags_count; 14376 var cur_bit_bag: u32 = undefined; 14377 var field_i: u32 = 0; 14378 while (field_i < fields_len) : (field_i += 1) { 14379 if (field_i % fields_per_u32 == 0) { 14380 cur_bit_bag = zir.extra[bit_bag_index]; 14381 bit_bag_index += 1; 14382 } 14383 const has_align = @truncate(u1, cur_bit_bag) != 0; 14384 cur_bit_bag >>= 1; 14385 const has_default = @truncate(u1, cur_bit_bag) != 0; 14386 cur_bit_bag >>= 1; 14387 const is_comptime = @truncate(u1, cur_bit_bag) != 0; 14388 cur_bit_bag >>= 1; 14389 const unused = @truncate(u1, cur_bit_bag) != 0; 14390 cur_bit_bag >>= 1; 14391 14392 _ = unused; 14393 14394 const field_name_zir = zir.nullTerminatedString(zir.extra[extra_index]); 14395 extra_index += 1; 14396 const field_type_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); 14397 extra_index += 1; 14398 14399 // This string needs to outlive the ZIR code. 14400 const field_name = try decl_arena_allocator.dupe(u8, field_name_zir); 14401 const field_ty: Type = if (field_type_ref == .none) 14402 Type.initTag(.noreturn) 14403 else 14404 // TODO: if we need to report an error here, use a source location 14405 // that points to this type expression rather than the struct. 14406 // But only resolve the source location if we need to emit a compile error. 14407 try sema.resolveType(&block_scope, src, field_type_ref); 14408 14409 const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name); 14410 assert(!gop.found_existing); 14411 gop.value_ptr.* = .{ 14412 .ty = try field_ty.copy(decl_arena_allocator), 14413 .abi_align = Value.initTag(.abi_align_default), 14414 .default_val = Value.initTag(.unreachable_value), 14415 .is_comptime = is_comptime, 14416 .offset = undefined, 14417 }; 14418 14419 if (has_align) { 14420 const align_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); 14421 extra_index += 1; 14422 // TODO: if we need to report an error here, use a source location 14423 // that points to this alignment expression rather than the struct. 14424 // But only resolve the source location if we need to emit a compile error. 14425 const abi_align_val = (try sema.resolveInstConst(&block_scope, src, align_ref)).val; 14426 gop.value_ptr.abi_align = try abi_align_val.copy(decl_arena_allocator); 14427 } 14428 if (has_default) { 14429 const default_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); 14430 extra_index += 1; 14431 const default_inst = sema.resolveInst(default_ref); 14432 // TODO: if we need to report an error here, use a source location 14433 // that points to this default value expression rather than the struct. 14434 // But only resolve the source location if we need to emit a compile error. 14435 const default_val = (try sema.resolveMaybeUndefVal(&block_scope, src, default_inst)) orelse 14436 return sema.failWithNeededComptime(&block_scope, src); 14437 gop.value_ptr.default_val = try default_val.copy(decl_arena_allocator); 14438 } 14439 } 14440} 14441 14442fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { 14443 const tracy = trace(@src()); 14444 defer tracy.end(); 14445 14446 const gpa = mod.gpa; 14447 const decl = union_obj.owner_decl; 14448 const zir = union_obj.namespace.file_scope.zir; 14449 const extended = zir.instructions.items(.data)[union_obj.zir_index].extended; 14450 assert(extended.opcode == .union_decl); 14451 const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small); 14452 var extra_index: usize = extended.operand; 14453 14454 const src: LazySrcLoc = .{ .node_offset = union_obj.node_offset }; 14455 extra_index += @boolToInt(small.has_src_node); 14456 14457 const tag_type_ref: Zir.Inst.Ref = if (small.has_tag_type) blk: { 14458 const ty_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); 14459 extra_index += 1; 14460 break :blk ty_ref; 14461 } else .none; 14462 14463 const body_len = if (small.has_body_len) blk: { 14464 const body_len = zir.extra[extra_index]; 14465 extra_index += 1; 14466 break :blk body_len; 14467 } else 0; 14468 14469 const fields_len = if (small.has_fields_len) blk: { 14470 const fields_len = zir.extra[extra_index]; 14471 extra_index += 1; 14472 break :blk fields_len; 14473 } else 0; 14474 14475 const decls_len = if (small.has_decls_len) decls_len: { 14476 const decls_len = zir.extra[extra_index]; 14477 extra_index += 1; 14478 break :decls_len decls_len; 14479 } else 0; 14480 14481 // Skip over decls. 14482 var decls_it = zir.declIteratorInner(extra_index, decls_len); 14483 while (decls_it.next()) |_| {} 14484 extra_index = decls_it.extra_index; 14485 14486 const body = zir.extra[extra_index..][0..body_len]; 14487 if (fields_len == 0) { 14488 assert(body.len == 0); 14489 return; 14490 } 14491 extra_index += body.len; 14492 14493 var decl_arena = union_obj.owner_decl.value_arena.?.promote(gpa); 14494 defer union_obj.owner_decl.value_arena.?.* = decl_arena.state; 14495 const decl_arena_allocator = decl_arena.allocator(); 14496 14497 var analysis_arena = std.heap.ArenaAllocator.init(gpa); 14498 defer analysis_arena.deinit(); 14499 14500 var sema: Sema = .{ 14501 .mod = mod, 14502 .gpa = gpa, 14503 .arena = analysis_arena.allocator(), 14504 .perm_arena = decl_arena_allocator, 14505 .code = zir, 14506 .owner_decl = decl, 14507 .func = null, 14508 .fn_ret_ty = Type.void, 14509 .owner_func = null, 14510 }; 14511 defer sema.deinit(); 14512 14513 var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); 14514 defer wip_captures.deinit(); 14515 14516 var block_scope: Block = .{ 14517 .parent = null, 14518 .sema = &sema, 14519 .src_decl = decl, 14520 .namespace = &union_obj.namespace, 14521 .wip_capture_scope = wip_captures.scope, 14522 .instructions = .{}, 14523 .inlining = null, 14524 .is_comptime = true, 14525 }; 14526 defer { 14527 assert(block_scope.instructions.items.len == 0); 14528 block_scope.params.deinit(gpa); 14529 } 14530 14531 if (body.len != 0) { 14532 _ = try sema.analyzeBody(&block_scope, body); 14533 } 14534 14535 try wip_captures.finalize(); 14536 14537 try union_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); 14538 14539 var int_tag_ty: Type = undefined; 14540 var enum_field_names: ?*Module.EnumNumbered.NameMap = null; 14541 var enum_value_map: ?*Module.EnumNumbered.ValueMap = null; 14542 if (tag_type_ref != .none) { 14543 const provided_ty = try sema.resolveType(&block_scope, src, tag_type_ref); 14544 if (small.auto_enum_tag) { 14545 // The provided type is an integer type and we must construct the enum tag type here. 14546 int_tag_ty = provided_ty; 14547 union_obj.tag_ty = try sema.generateUnionTagTypeNumbered(&block_scope, fields_len, provided_ty); 14548 enum_field_names = &union_obj.tag_ty.castTag(.enum_numbered).?.data.fields; 14549 enum_value_map = &union_obj.tag_ty.castTag(.enum_numbered).?.data.values; 14550 } else { 14551 // The provided type is the enum tag type. 14552 union_obj.tag_ty = provided_ty; 14553 } 14554 } else { 14555 // If auto_enum_tag is false, this is an untagged union. However, for semantic analysis 14556 // purposes, we still auto-generate an enum tag type the same way. That the union is 14557 // untagged is represented by the Type tag (union vs union_tagged). 14558 union_obj.tag_ty = try sema.generateUnionTagTypeSimple(&block_scope, fields_len); 14559 enum_field_names = &union_obj.tag_ty.castTag(.enum_simple).?.data.fields; 14560 } 14561 14562 const bits_per_field = 4; 14563 const fields_per_u32 = 32 / bits_per_field; 14564 const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable; 14565 var bit_bag_index: usize = extra_index; 14566 extra_index += bit_bags_count; 14567 var cur_bit_bag: u32 = undefined; 14568 var field_i: u32 = 0; 14569 while (field_i < fields_len) : (field_i += 1) { 14570 if (field_i % fields_per_u32 == 0) { 14571 cur_bit_bag = zir.extra[bit_bag_index]; 14572 bit_bag_index += 1; 14573 } 14574 const has_type = @truncate(u1, cur_bit_bag) != 0; 14575 cur_bit_bag >>= 1; 14576 const has_align = @truncate(u1, cur_bit_bag) != 0; 14577 cur_bit_bag >>= 1; 14578 const has_tag = @truncate(u1, cur_bit_bag) != 0; 14579 cur_bit_bag >>= 1; 14580 const unused = @truncate(u1, cur_bit_bag) != 0; 14581 cur_bit_bag >>= 1; 14582 _ = unused; 14583 14584 const field_name_zir = zir.nullTerminatedString(zir.extra[extra_index]); 14585 extra_index += 1; 14586 14587 const field_type_ref: Zir.Inst.Ref = if (has_type) blk: { 14588 const field_type_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); 14589 extra_index += 1; 14590 break :blk field_type_ref; 14591 } else .none; 14592 14593 const align_ref: Zir.Inst.Ref = if (has_align) blk: { 14594 const align_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); 14595 extra_index += 1; 14596 break :blk align_ref; 14597 } else .none; 14598 14599 const tag_ref: Zir.Inst.Ref = if (has_tag) blk: { 14600 const tag_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); 14601 extra_index += 1; 14602 break :blk tag_ref; 14603 } else .none; 14604 14605 if (enum_value_map) |map| { 14606 const tag_src = src; // TODO better source location 14607 const coerced = try sema.coerce(&block_scope, int_tag_ty, tag_ref, tag_src); 14608 const val = try sema.resolveConstValue(&block_scope, tag_src, coerced); 14609 map.putAssumeCapacityContext(val, {}, .{ .ty = int_tag_ty }); 14610 } 14611 14612 // This string needs to outlive the ZIR code. 14613 const field_name = try decl_arena_allocator.dupe(u8, field_name_zir); 14614 if (enum_field_names) |set| { 14615 set.putAssumeCapacity(field_name, {}); 14616 } 14617 14618 const field_ty: Type = if (!has_type) 14619 Type.void 14620 else if (field_type_ref == .none) 14621 Type.initTag(.noreturn) 14622 else 14623 // TODO: if we need to report an error here, use a source location 14624 // that points to this type expression rather than the union. 14625 // But only resolve the source location if we need to emit a compile error. 14626 try sema.resolveType(&block_scope, src, field_type_ref); 14627 14628 const gop = union_obj.fields.getOrPutAssumeCapacity(field_name); 14629 assert(!gop.found_existing); 14630 gop.value_ptr.* = .{ 14631 .ty = try field_ty.copy(decl_arena_allocator), 14632 .abi_align = Value.initTag(.abi_align_default), 14633 }; 14634 14635 if (align_ref != .none) { 14636 // TODO: if we need to report an error here, use a source location 14637 // that points to this alignment expression rather than the struct. 14638 // But only resolve the source location if we need to emit a compile error. 14639 const abi_align_val = (try sema.resolveInstConst(&block_scope, src, align_ref)).val; 14640 gop.value_ptr.abi_align = try abi_align_val.copy(decl_arena_allocator); 14641 } else { 14642 gop.value_ptr.abi_align = Value.initTag(.abi_align_default); 14643 } 14644 } 14645} 14646 14647fn generateUnionTagTypeNumbered( 14648 sema: *Sema, 14649 block: *Block, 14650 fields_len: u32, 14651 int_ty: Type, 14652) !Type { 14653 const mod = sema.mod; 14654 14655 var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); 14656 errdefer new_decl_arena.deinit(); 14657 const new_decl_arena_allocator = new_decl_arena.allocator(); 14658 14659 const enum_obj = try new_decl_arena_allocator.create(Module.EnumNumbered); 14660 const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumNumbered); 14661 enum_ty_payload.* = .{ 14662 .base = .{ .tag = .enum_numbered }, 14663 .data = enum_obj, 14664 }; 14665 const enum_ty = Type.initPayload(&enum_ty_payload.base); 14666 const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); 14667 // TODO better type name 14668 const new_decl = try mod.createAnonymousDecl(block, .{ 14669 .ty = Type.type, 14670 .val = enum_val, 14671 }); 14672 new_decl.owns_tv = true; 14673 errdefer mod.abortAnonDecl(new_decl); 14674 14675 enum_obj.* = .{ 14676 .owner_decl = new_decl, 14677 .tag_ty = int_ty, 14678 .fields = .{}, 14679 .values = .{}, 14680 .node_offset = 0, 14681 }; 14682 // Here we pre-allocate the maps using the decl arena. 14683 try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); 14684 try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{ .ty = int_ty }); 14685 try new_decl.finalizeNewArena(&new_decl_arena); 14686 return enum_ty; 14687} 14688 14689fn generateUnionTagTypeSimple(sema: *Sema, block: *Block, fields_len: u32) !Type { 14690 const mod = sema.mod; 14691 14692 var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); 14693 errdefer new_decl_arena.deinit(); 14694 const new_decl_arena_allocator = new_decl_arena.allocator(); 14695 14696 const enum_obj = try new_decl_arena_allocator.create(Module.EnumSimple); 14697 const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumSimple); 14698 enum_ty_payload.* = .{ 14699 .base = .{ .tag = .enum_simple }, 14700 .data = enum_obj, 14701 }; 14702 const enum_ty = Type.initPayload(&enum_ty_payload.base); 14703 const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); 14704 // TODO better type name 14705 const new_decl = try mod.createAnonymousDecl(block, .{ 14706 .ty = Type.type, 14707 .val = enum_val, 14708 }); 14709 new_decl.owns_tv = true; 14710 errdefer mod.abortAnonDecl(new_decl); 14711 14712 enum_obj.* = .{ 14713 .owner_decl = new_decl, 14714 .fields = .{}, 14715 .node_offset = 0, 14716 }; 14717 // Here we pre-allocate the maps using the decl arena. 14718 try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); 14719 try new_decl.finalizeNewArena(&new_decl_arena); 14720 return enum_ty; 14721} 14722 14723fn getBuiltin( 14724 sema: *Sema, 14725 block: *Block, 14726 src: LazySrcLoc, 14727 name: []const u8, 14728) CompileError!Air.Inst.Ref { 14729 const mod = sema.mod; 14730 const std_pkg = mod.main_pkg.table.get("std").?; 14731 const std_file = (mod.importPkg(std_pkg) catch unreachable).file; 14732 const opt_builtin_inst = try sema.namespaceLookupRef( 14733 block, 14734 src, 14735 std_file.root_decl.?.src_namespace, 14736 "builtin", 14737 ); 14738 const builtin_inst = try sema.analyzeLoad(block, src, opt_builtin_inst.?, src); 14739 const builtin_ty = try sema.analyzeAsType(block, src, builtin_inst); 14740 const opt_ty_inst = try sema.namespaceLookupRef( 14741 block, 14742 src, 14743 builtin_ty.getNamespace().?, 14744 name, 14745 ); 14746 return sema.analyzeLoad(block, src, opt_ty_inst.?, src); 14747} 14748 14749fn getBuiltinType( 14750 sema: *Sema, 14751 block: *Block, 14752 src: LazySrcLoc, 14753 name: []const u8, 14754) CompileError!Type { 14755 const ty_inst = try sema.getBuiltin(block, src, name); 14756 return sema.analyzeAsType(block, src, ty_inst); 14757} 14758 14759/// There is another implementation of this in `Type.onePossibleValue`. This one 14760/// in `Sema` is for calling during semantic analysis, and performs field resolution 14761/// to get the answer. The one in `Type` is for calling during codegen and asserts 14762/// that the types are already resolved. 14763fn typeHasOnePossibleValue( 14764 sema: *Sema, 14765 block: *Block, 14766 src: LazySrcLoc, 14767 ty: Type, 14768) CompileError!?Value { 14769 switch (ty.tag()) { 14770 .f16, 14771 .f32, 14772 .f64, 14773 .f128, 14774 .c_longdouble, 14775 .comptime_int, 14776 .comptime_float, 14777 .u1, 14778 .u8, 14779 .i8, 14780 .u16, 14781 .i16, 14782 .u32, 14783 .i32, 14784 .u64, 14785 .i64, 14786 .u128, 14787 .i128, 14788 .usize, 14789 .isize, 14790 .c_short, 14791 .c_ushort, 14792 .c_int, 14793 .c_uint, 14794 .c_long, 14795 .c_ulong, 14796 .c_longlong, 14797 .c_ulonglong, 14798 .bool, 14799 .type, 14800 .anyerror, 14801 .fn_noreturn_no_args, 14802 .fn_void_no_args, 14803 .fn_naked_noreturn_no_args, 14804 .fn_ccc_void_no_args, 14805 .function, 14806 .single_const_pointer_to_comptime_int, 14807 .array_sentinel, 14808 .array_u8_sentinel_0, 14809 .const_slice_u8, 14810 .const_slice, 14811 .mut_slice, 14812 .anyopaque, 14813 .optional, 14814 .optional_single_mut_pointer, 14815 .optional_single_const_pointer, 14816 .enum_literal, 14817 .anyerror_void_error_union, 14818 .error_union, 14819 .error_set, 14820 .error_set_single, 14821 .error_set_inferred, 14822 .error_set_merged, 14823 .@"opaque", 14824 .var_args_param, 14825 .manyptr_u8, 14826 .manyptr_const_u8, 14827 .atomic_order, 14828 .atomic_rmw_op, 14829 .calling_convention, 14830 .address_space, 14831 .float_mode, 14832 .reduce_op, 14833 .call_options, 14834 .prefetch_options, 14835 .export_options, 14836 .extern_options, 14837 .type_info, 14838 .@"anyframe", 14839 .anyframe_T, 14840 .many_const_pointer, 14841 .many_mut_pointer, 14842 .c_const_pointer, 14843 .c_mut_pointer, 14844 .single_const_pointer, 14845 .single_mut_pointer, 14846 .pointer, 14847 .bound_fn, 14848 => return null, 14849 14850 .@"struct" => { 14851 const resolved_ty = try sema.resolveTypeFields(block, src, ty); 14852 const s = resolved_ty.castTag(.@"struct").?.data; 14853 for (s.fields.values()) |value| { 14854 if ((try sema.typeHasOnePossibleValue(block, src, value.ty)) == null) { 14855 return null; 14856 } 14857 } 14858 return Value.initTag(.empty_struct_value); 14859 }, 14860 .enum_numbered => { 14861 const resolved_ty = try sema.resolveTypeFields(block, src, ty); 14862 const enum_obj = resolved_ty.castTag(.enum_numbered).?.data; 14863 if (enum_obj.fields.count() == 1) { 14864 if (enum_obj.values.count() == 0) { 14865 return Value.zero; // auto-numbered 14866 } else { 14867 return enum_obj.values.keys()[0]; 14868 } 14869 } else { 14870 return null; 14871 } 14872 }, 14873 .enum_full => { 14874 const resolved_ty = try sema.resolveTypeFields(block, src, ty); 14875 const enum_obj = resolved_ty.castTag(.enum_full).?.data; 14876 if (enum_obj.fields.count() == 1) { 14877 if (enum_obj.values.count() == 0) { 14878 return Value.zero; // auto-numbered 14879 } else { 14880 return enum_obj.values.keys()[0]; 14881 } 14882 } else { 14883 return null; 14884 } 14885 }, 14886 .enum_simple => { 14887 const resolved_ty = try sema.resolveTypeFields(block, src, ty); 14888 const enum_simple = resolved_ty.castTag(.enum_simple).?.data; 14889 if (enum_simple.fields.count() == 1) { 14890 return Value.zero; 14891 } else { 14892 return null; 14893 } 14894 }, 14895 .enum_nonexhaustive => { 14896 const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; 14897 if (!tag_ty.hasCodeGenBits()) { 14898 return Value.zero; 14899 } else { 14900 return null; 14901 } 14902 }, 14903 .@"union" => { 14904 return null; // TODO 14905 }, 14906 .union_tagged => { 14907 return null; // TODO 14908 }, 14909 14910 .empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value), 14911 .void => return Value.void, 14912 .noreturn => return Value.initTag(.unreachable_value), 14913 .@"null" => return Value.@"null", 14914 .@"undefined" => return Value.initTag(.undef), 14915 14916 .int_unsigned, .int_signed => { 14917 if (ty.cast(Type.Payload.Bits).?.data == 0) { 14918 return Value.zero; 14919 } else { 14920 return null; 14921 } 14922 }, 14923 .vector, .array, .array_u8 => { 14924 if (ty.arrayLen() == 0) 14925 return Value.initTag(.empty_array); 14926 if ((try sema.typeHasOnePossibleValue(block, src, ty.elemType())) != null) { 14927 return Value.initTag(.the_only_possible_value); 14928 } 14929 return null; 14930 }, 14931 14932 .inferred_alloc_const => unreachable, 14933 .inferred_alloc_mut => unreachable, 14934 .generic_poison => return error.GenericPoison, 14935 } 14936} 14937 14938fn getAstTree(sema: *Sema, block: *Block) CompileError!*const std.zig.Ast { 14939 return block.namespace.file_scope.getTree(sema.gpa) catch |err| { 14940 log.err("unable to load AST to report compile error: {s}", .{@errorName(err)}); 14941 return error.AnalysisFail; 14942 }; 14943} 14944 14945fn enumFieldSrcLoc( 14946 decl: *Decl, 14947 tree: std.zig.Ast, 14948 node_offset: i32, 14949 field_index: usize, 14950) LazySrcLoc { 14951 @setCold(true); 14952 const enum_node = decl.relativeToNodeIndex(node_offset); 14953 const node_tags = tree.nodes.items(.tag); 14954 var buffer: [2]std.zig.Ast.Node.Index = undefined; 14955 const container_decl = switch (node_tags[enum_node]) { 14956 .container_decl, 14957 .container_decl_trailing, 14958 => tree.containerDecl(enum_node), 14959 14960 .container_decl_two, 14961 .container_decl_two_trailing, 14962 => tree.containerDeclTwo(&buffer, enum_node), 14963 14964 .container_decl_arg, 14965 .container_decl_arg_trailing, 14966 => tree.containerDeclArg(enum_node), 14967 14968 else => unreachable, 14969 }; 14970 var it_index: usize = 0; 14971 for (container_decl.ast.members) |member_node| { 14972 switch (node_tags[member_node]) { 14973 .container_field_init, 14974 .container_field_align, 14975 .container_field, 14976 => { 14977 if (it_index == field_index) { 14978 return .{ .node_offset = decl.nodeIndexToRelative(member_node) }; 14979 } 14980 it_index += 1; 14981 }, 14982 14983 else => continue, 14984 } 14985 } else unreachable; 14986} 14987 14988/// Returns the type of the AIR instruction. 14989fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type { 14990 return sema.getTmpAir().typeOf(inst); 14991} 14992 14993fn getTmpAir(sema: Sema) Air { 14994 return .{ 14995 .instructions = sema.air_instructions.slice(), 14996 .extra = sema.air_extra.items, 14997 .values = sema.air_values.items, 14998 }; 14999} 15000 15001pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { 15002 switch (ty.tag()) { 15003 .u1 => return .u1_type, 15004 .u8 => return .u8_type, 15005 .i8 => return .i8_type, 15006 .u16 => return .u16_type, 15007 .i16 => return .i16_type, 15008 .u32 => return .u32_type, 15009 .i32 => return .i32_type, 15010 .u64 => return .u64_type, 15011 .i64 => return .i64_type, 15012 .u128 => return .u128_type, 15013 .i128 => return .i128_type, 15014 .usize => return .usize_type, 15015 .isize => return .isize_type, 15016 .c_short => return .c_short_type, 15017 .c_ushort => return .c_ushort_type, 15018 .c_int => return .c_int_type, 15019 .c_uint => return .c_uint_type, 15020 .c_long => return .c_long_type, 15021 .c_ulong => return .c_ulong_type, 15022 .c_longlong => return .c_longlong_type, 15023 .c_ulonglong => return .c_ulonglong_type, 15024 .c_longdouble => return .c_longdouble_type, 15025 .f16 => return .f16_type, 15026 .f32 => return .f32_type, 15027 .f64 => return .f64_type, 15028 .f128 => return .f128_type, 15029 .anyopaque => return .anyopaque_type, 15030 .bool => return .bool_type, 15031 .void => return .void_type, 15032 .type => return .type_type, 15033 .anyerror => return .anyerror_type, 15034 .comptime_int => return .comptime_int_type, 15035 .comptime_float => return .comptime_float_type, 15036 .noreturn => return .noreturn_type, 15037 .@"anyframe" => return .anyframe_type, 15038 .@"null" => return .null_type, 15039 .@"undefined" => return .undefined_type, 15040 .enum_literal => return .enum_literal_type, 15041 .atomic_order => return .atomic_order_type, 15042 .atomic_rmw_op => return .atomic_rmw_op_type, 15043 .calling_convention => return .calling_convention_type, 15044 .address_space => return .address_space_type, 15045 .float_mode => return .float_mode_type, 15046 .reduce_op => return .reduce_op_type, 15047 .call_options => return .call_options_type, 15048 .prefetch_options => return .prefetch_options_type, 15049 .export_options => return .export_options_type, 15050 .extern_options => return .extern_options_type, 15051 .type_info => return .type_info_type, 15052 .manyptr_u8 => return .manyptr_u8_type, 15053 .manyptr_const_u8 => return .manyptr_const_u8_type, 15054 .fn_noreturn_no_args => return .fn_noreturn_no_args_type, 15055 .fn_void_no_args => return .fn_void_no_args_type, 15056 .fn_naked_noreturn_no_args => return .fn_naked_noreturn_no_args_type, 15057 .fn_ccc_void_no_args => return .fn_ccc_void_no_args_type, 15058 .single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type, 15059 .const_slice_u8 => return .const_slice_u8_type, 15060 .anyerror_void_error_union => return .anyerror_void_error_union_type, 15061 .generic_poison => return .generic_poison_type, 15062 else => {}, 15063 } 15064 try sema.air_instructions.append(sema.gpa, .{ 15065 .tag = .const_ty, 15066 .data = .{ .ty = ty }, 15067 }); 15068 return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); 15069} 15070 15071fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { 15072 return sema.addConstant(ty, try Value.Tag.int_u64.create(sema.arena, int)); 15073} 15074 15075fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { 15076 return sema.addConstant(ty, Value.initTag(.undef)); 15077} 15078 15079pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { 15080 const gpa = sema.gpa; 15081 const ty_inst = try sema.addType(ty); 15082 try sema.air_values.append(gpa, val); 15083 try sema.air_instructions.append(gpa, .{ 15084 .tag = .constant, 15085 .data = .{ .ty_pl = .{ 15086 .ty = ty_inst, 15087 .payload = @intCast(u32, sema.air_values.items.len - 1), 15088 } }, 15089 }); 15090 return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); 15091} 15092 15093pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 { 15094 const fields = std.meta.fields(@TypeOf(extra)); 15095 try sema.air_extra.ensureUnusedCapacity(sema.gpa, fields.len); 15096 return addExtraAssumeCapacity(sema, extra); 15097} 15098 15099pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { 15100 const fields = std.meta.fields(@TypeOf(extra)); 15101 const result = @intCast(u32, sema.air_extra.items.len); 15102 inline for (fields) |field| { 15103 sema.air_extra.appendAssumeCapacity(switch (field.field_type) { 15104 u32 => @field(extra, field.name), 15105 Air.Inst.Ref => @enumToInt(@field(extra, field.name)), 15106 i32 => @bitCast(u32, @field(extra, field.name)), 15107 else => @compileError("bad field type"), 15108 }); 15109 } 15110 return result; 15111} 15112 15113fn appendRefsAssumeCapacity(sema: *Sema, refs: []const Air.Inst.Ref) void { 15114 const coerced = @bitCast([]const u32, refs); 15115 sema.air_extra.appendSliceAssumeCapacity(coerced); 15116} 15117 15118fn getBreakBlock(sema: *Sema, inst_index: Air.Inst.Index) ?Air.Inst.Index { 15119 const air_datas = sema.air_instructions.items(.data); 15120 const air_tags = sema.air_instructions.items(.tag); 15121 switch (air_tags[inst_index]) { 15122 .br => return air_datas[inst_index].br.block_inst, 15123 else => return null, 15124 } 15125} 15126 15127fn isComptimeKnown( 15128 sema: *Sema, 15129 block: *Block, 15130 src: LazySrcLoc, 15131 inst: Air.Inst.Ref, 15132) !bool { 15133 return (try sema.resolveMaybeUndefVal(block, src, inst)) != null; 15134} 15135 15136fn analyzeComptimeAlloc( 15137 sema: *Sema, 15138 block: *Block, 15139 var_type: Type, 15140 alignment: u32, 15141) CompileError!Air.Inst.Ref { 15142 const ptr_type = try Type.ptr(sema.arena, .{ 15143 .pointee_type = var_type, 15144 .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .global_constant), 15145 .@"align" = alignment, 15146 }); 15147 15148 var anon_decl = try block.startAnonDecl(); 15149 defer anon_decl.deinit(); 15150 15151 const align_val = if (alignment == 0) 15152 Value.@"null" 15153 else 15154 try Value.Tag.int_u64.create(anon_decl.arena(), alignment); 15155 15156 const decl = try anon_decl.finish( 15157 try var_type.copy(anon_decl.arena()), 15158 // There will be stores before the first load, but they may be to sub-elements or 15159 // sub-fields. So we need to initialize with undef to allow the mechanism to expand 15160 // into fields/elements and have those overridden with stored values. 15161 Value.undef, 15162 ); 15163 decl.align_val = align_val; 15164 15165 try sema.mod.declareDeclDependency(sema.owner_decl, decl); 15166 return sema.addConstant(ptr_type, try Value.Tag.decl_ref_mut.create(sema.arena, .{ 15167 .runtime_index = block.runtime_index, 15168 .decl = decl, 15169 })); 15170} 15171 15172/// The places where a user can specify an address space attribute 15173pub const AddressSpaceContext = enum { 15174 /// A function is specified to be placed in a certain address space. 15175 function, 15176 15177 /// A (global) variable is specified to be placed in a certain address space. 15178 /// In contrast to .constant, these values (and thus the address space they will be 15179 /// placed in) are required to be mutable. 15180 variable, 15181 15182 /// A (global) constant value is specified to be placed in a certain address space. 15183 /// In contrast to .variable, values placed in this address space are not required to be mutable. 15184 constant, 15185 15186 /// A pointer is ascripted to point into a certain address space. 15187 pointer, 15188}; 15189 15190pub fn analyzeAddrspace( 15191 sema: *Sema, 15192 block: *Block, 15193 src: LazySrcLoc, 15194 zir_ref: Zir.Inst.Ref, 15195 ctx: AddressSpaceContext, 15196) !std.builtin.AddressSpace { 15197 const addrspace_tv = try sema.resolveInstConst(block, src, zir_ref); 15198 const address_space = addrspace_tv.val.toEnum(std.builtin.AddressSpace); 15199 const target = sema.mod.getTarget(); 15200 const arch = target.cpu.arch; 15201 15202 const supported = switch (address_space) { 15203 .generic => true, 15204 .gs, .fs, .ss => (arch == .i386 or arch == .x86_64) and ctx == .pointer, 15205 }; 15206 15207 if (!supported) { 15208 // TODO error messages could be made more elaborate here 15209 const entity = switch (ctx) { 15210 .function => "functions", 15211 .variable => "mutable values", 15212 .constant => "constant values", 15213 .pointer => "pointers", 15214 }; 15215 15216 return sema.fail( 15217 block, 15218 src, 15219 "{s} with address space '{s}' are not supported on {s}", 15220 .{ entity, @tagName(address_space), arch.genericName() }, 15221 ); 15222 } 15223 15224 return address_space; 15225} 15226 15227/// Asserts the value is a pointer and dereferences it. 15228/// Returns `null` if the pointer contents cannot be loaded at comptime. 15229fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value { 15230 const target = sema.mod.getTarget(); 15231 const load_ty = ptr_ty.childType(); 15232 const parent = sema.beginComptimePtrLoad(block, src, ptr_val) catch |err| switch (err) { 15233 error.RuntimeLoad => return null, 15234 else => |e| return e, 15235 }; 15236 // We have a Value that lines up in virtual memory exactly with what we want to load. 15237 // If the Type is in-memory coercable to `load_ty`, it may be returned without modifications. 15238 const coerce_in_mem_ok = 15239 coerceInMemoryAllowed(load_ty, parent.ty, false, target) == .ok or 15240 coerceInMemoryAllowed(parent.ty, load_ty, false, target) == .ok; 15241 if (coerce_in_mem_ok) { 15242 if (parent.is_mutable) { 15243 // The decl whose value we are obtaining here may be overwritten with 15244 // a different value upon further semantic analysis, which would 15245 // invalidate this memory. So we must copy here. 15246 return try parent.val.copy(sema.arena); 15247 } 15248 return parent.val; 15249 } 15250 15251 // The type is not in-memory coercable, so it must be bitcasted according 15252 // to the pointer type we are performing the load through. 15253 15254 // TODO emit a compile error if the types are not allowed to be bitcasted 15255 15256 if (parent.ty.abiSize(target) >= load_ty.abiSize(target)) { 15257 // The Type it is stored as in the compiler has an ABI size greater or equal to 15258 // the ABI size of `load_ty`. We may perform the bitcast based on 15259 // `parent.val` alone (more efficient). 15260 return try sema.bitCastVal(block, src, parent.val, parent.ty, load_ty); 15261 } 15262 15263 // The Type it is stored as in the compiler has an ABI size less than the ABI size 15264 // of `load_ty`. The bitcast must be performed based on the `parent.root_val` 15265 // and reinterpreted starting at `parent.byte_offset`. 15266 return sema.fail(block, src, "TODO: implement bitcast with index offset", .{}); 15267} 15268 15269/// Used to convert a u64 value to a usize value, emitting a compile error if the number 15270/// is too big to fit. 15271fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError!usize { 15272 return std.math.cast(usize, int) catch |err| switch (err) { 15273 error.Overflow => return sema.fail(block, src, "expression produces integer value {d} which is too big for this compiler implementation to handle", .{int}), 15274 }; 15275} 15276