From c06cf6e95cc0ff1419c34d13d6eef3debc28ede7 Mon Sep 17 00:00:00 2001 From: Techarix <19954306+Techatrix@users.noreply.github.com> Date: Tue, 14 Feb 2023 22:13:07 +0100 Subject: [PATCH 1/3] add zig code for generating Zir --- src/stage2/AstGen.zig | 12814 +++++++++++++++++++++++++++++++++++++ src/stage2/BuiltinFn.zig | 1007 +++ src/stage2/LICENSE | 21 + src/stage2/Module.zig | 960 +++ src/stage2/Zir.zig | 3855 +++++++++++ 5 files changed, 18657 insertions(+) create mode 100644 src/stage2/AstGen.zig create mode 100644 src/stage2/BuiltinFn.zig create mode 100644 src/stage2/LICENSE create mode 100644 src/stage2/Module.zig create mode 100644 src/stage2/Zir.zig diff --git a/src/stage2/AstGen.zig b/src/stage2/AstGen.zig new file mode 100644 index 0000000..e6e2d70 --- /dev/null +++ b/src/stage2/AstGen.zig @@ -0,0 +1,12814 @@ +//! Ingests an AST and produces ZIR code. +const AstGen = @This(); + +const std = @import("std"); +const Ast = std.zig.Ast; +const mem = std.mem; +const Allocator = std.mem.Allocator; +const assert = std.debug.assert; +const ArrayListUnmanaged = std.ArrayListUnmanaged; +const StringIndexAdapter = std.hash_map.StringIndexAdapter; +const StringIndexContext = std.hash_map.StringIndexContext; + +const isPrimitive = std.zig.primitives.isPrimitive; + +const Zir = @import("Zir.zig"); +const refToIndex = Zir.refToIndex; +const indexToRef = Zir.indexToRef; +const trace = @import("../tracy.zig").trace; +const BuiltinFn = @import("BuiltinFn.zig"); + +gpa: Allocator, +tree: *const Ast, +instructions: std.MultiArrayList(Zir.Inst) = .{}, +extra: ArrayListUnmanaged(u32) = .{}, +string_bytes: ArrayListUnmanaged(u8) = .{}, +/// Tracks the current byte offset within the source file. +/// Used to populate line deltas in the ZIR. AstGen maintains +/// this "cursor" throughout the entire AST lowering process in order +/// to avoid starting over the line/column scan for every declaration, which +/// would be O(N^2). +source_offset: u32 = 0, +/// Tracks the corresponding line of `source_offset`. +/// This value is absolute. +source_line: u32 = 0, +/// Tracks the corresponding column of `source_offset`. +/// This value is absolute. +source_column: u32 = 0, +/// Used for temporary allocations; freed after AstGen is complete. +/// The resulting ZIR code has no references to anything in this arena. +arena: Allocator, +string_table: std.HashMapUnmanaged(u32, void, StringIndexContext, std.hash_map.default_max_load_percentage) = .{}, +compile_errors: ArrayListUnmanaged(Zir.Inst.CompileErrors.Item) = .{}, +/// The topmost block of the current function. +fn_block: ?*GenZir = null, +fn_var_args: bool = false, +/// Maps string table indexes to the first `@import` ZIR instruction +/// that uses this string as the operand. +imports: std.AutoArrayHashMapUnmanaged(u32, Ast.TokenIndex) = .{}, +/// Used for temporary storage when building payloads. +scratch: std.ArrayListUnmanaged(u32) = .{}, +/// Whenever a `ref` instruction is needed, it is created and saved in this +/// table instead of being immediately appended to the current block body. +/// Then, when the instruction is being added to the parent block (typically from +/// setBlockBody), if it has a ref_table entry, then the ref instruction is added +/// there. This makes sure two properties are upheld: +/// 1. All pointers to the same locals return the same address. This is required +/// to be compliant with the language specification. +/// 2. `ref` instructions will dominate their uses. This is a required property +/// of ZIR. +/// The key is the ref operand; the value is the ref instruction. +ref_table: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}, + +const InnerError = error{ OutOfMemory, AnalysisFail }; + +fn addExtra(astgen: *AstGen, extra: anytype) Allocator.Error!u32 { + const fields = std.meta.fields(@TypeOf(extra)); + try astgen.extra.ensureUnusedCapacity(astgen.gpa, fields.len); + return addExtraAssumeCapacity(astgen, extra); +} + +fn addExtraAssumeCapacity(astgen: *AstGen, extra: anytype) u32 { + const fields = std.meta.fields(@TypeOf(extra)); + const result = @intCast(u32, astgen.extra.items.len); + astgen.extra.items.len += fields.len; + setExtra(astgen, result, extra); + return result; +} + +fn setExtra(astgen: *AstGen, index: usize, extra: anytype) void { + const fields = std.meta.fields(@TypeOf(extra)); + var i = index; + inline for (fields) |field| { + astgen.extra.items[i] = switch (field.type) { + u32 => @field(extra, field.name), + Zir.Inst.Ref => @enumToInt(@field(extra, field.name)), + i32 => @bitCast(u32, @field(extra, field.name)), + Zir.Inst.Call.Flags => @bitCast(u32, @field(extra, field.name)), + Zir.Inst.BuiltinCall.Flags => @bitCast(u32, @field(extra, field.name)), + Zir.Inst.SwitchBlock.Bits => @bitCast(u32, @field(extra, field.name)), + Zir.Inst.FuncFancy.Bits => @bitCast(u32, @field(extra, field.name)), + else => @compileError("bad field type"), + }; + i += 1; + } +} + +fn reserveExtra(astgen: *AstGen, size: usize) Allocator.Error!u32 { + const result = @intCast(u32, astgen.extra.items.len); + try astgen.extra.resize(astgen.gpa, result + size); + return result; +} + +fn appendRefs(astgen: *AstGen, refs: []const Zir.Inst.Ref) !void { + const coerced = @ptrCast([]const u32, refs); + return astgen.extra.appendSlice(astgen.gpa, coerced); +} + +fn appendRefsAssumeCapacity(astgen: *AstGen, refs: []const Zir.Inst.Ref) void { + const coerced = @ptrCast([]const u32, refs); + astgen.extra.appendSliceAssumeCapacity(coerced); +} + +pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir { + var arena = std.heap.ArenaAllocator.init(gpa); + defer arena.deinit(); + + var astgen: AstGen = .{ + .gpa = gpa, + .arena = arena.allocator(), + .tree = &tree, + }; + defer astgen.deinit(gpa); + + // String table indexes 0, 1, 2 are reserved for special meaning. + try astgen.string_bytes.appendSlice(gpa, &[_]u8{ 0, 0, 0 }); + + // We expect at least as many ZIR instructions and extra data items + // as AST nodes. + try astgen.instructions.ensureTotalCapacity(gpa, tree.nodes.len); + + // First few indexes of extra are reserved and set at the end. + const reserved_count = @typeInfo(Zir.ExtraIndex).Enum.fields.len; + try astgen.extra.ensureTotalCapacity(gpa, tree.nodes.len + reserved_count); + astgen.extra.items.len += reserved_count; + + var top_scope: Scope.Top = .{}; + + var gz_instructions: std.ArrayListUnmanaged(Zir.Inst.Index) = .{}; + var gen_scope: GenZir = .{ + .force_comptime = true, + .parent = &top_scope.base, + .anon_name_strategy = .parent, + .decl_node_index = 0, + .decl_line = 0, + .astgen = &astgen, + .instructions = &gz_instructions, + .instructions_top = 0, + }; + defer gz_instructions.deinit(gpa); + + // The AST -> ZIR lowering process assumes an AST that does not have any + // parse errors. + if (tree.errors.len == 0) { + if (AstGen.structDeclInner( + &gen_scope, + &gen_scope.base, + 0, + tree.containerDeclRoot(), + .Auto, + 0, + )) |struct_decl_ref| { + assert(refToIndex(struct_decl_ref).? == 0); + } else |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => {}, // Handled via compile_errors below. + } + } else { + try lowerAstErrors(&astgen); + } + + const err_index = @enumToInt(Zir.ExtraIndex.compile_errors); + if (astgen.compile_errors.items.len == 0) { + astgen.extra.items[err_index] = 0; + } else { + try astgen.extra.ensureUnusedCapacity(gpa, 1 + astgen.compile_errors.items.len * + @typeInfo(Zir.Inst.CompileErrors.Item).Struct.fields.len); + + astgen.extra.items[err_index] = astgen.addExtraAssumeCapacity(Zir.Inst.CompileErrors{ + .items_len = @intCast(u32, astgen.compile_errors.items.len), + }); + + for (astgen.compile_errors.items) |item| { + _ = astgen.addExtraAssumeCapacity(item); + } + } + + const imports_index = @enumToInt(Zir.ExtraIndex.imports); + if (astgen.imports.count() == 0) { + astgen.extra.items[imports_index] = 0; + } else { + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Imports).Struct.fields.len + + astgen.imports.count() * @typeInfo(Zir.Inst.Imports.Item).Struct.fields.len); + + astgen.extra.items[imports_index] = astgen.addExtraAssumeCapacity(Zir.Inst.Imports{ + .imports_len = @intCast(u32, astgen.imports.count()), + }); + + var it = astgen.imports.iterator(); + while (it.next()) |entry| { + _ = astgen.addExtraAssumeCapacity(Zir.Inst.Imports.Item{ + .name = entry.key_ptr.*, + .token = entry.value_ptr.*, + }); + } + } + + return Zir{ + .instructions = astgen.instructions.toOwnedSlice(), + .string_bytes = try astgen.string_bytes.toOwnedSlice(gpa), + .extra = try astgen.extra.toOwnedSlice(gpa), + }; +} + +fn deinit(astgen: *AstGen, gpa: Allocator) void { + astgen.instructions.deinit(gpa); + astgen.extra.deinit(gpa); + astgen.string_table.deinit(gpa); + astgen.string_bytes.deinit(gpa); + astgen.compile_errors.deinit(gpa); + astgen.imports.deinit(gpa); + astgen.scratch.deinit(gpa); + astgen.ref_table.deinit(gpa); +} + +const ResultInfo = struct { + /// The semantics requested for the result location + rl: Loc, + + /// The "operator" consuming the result location + ctx: Context = .none, + + /// Turns a `coerced_ty` back into a `ty`. Should be called at branch points + /// such as if and switch expressions. + fn br(ri: ResultInfo) ResultInfo { + return switch (ri.rl) { + .coerced_ty => |ty| .{ + .rl = .{ .ty = ty }, + .ctx = ri.ctx, + }, + else => ri, + }; + } + + fn zirTag(ri: ResultInfo) Zir.Inst.Tag { + switch (ri.rl) { + .ty => return switch (ri.ctx) { + .shift_op => .as_shift_operand, + else => .as_node, + }, + else => unreachable, + } + } + + const Loc = union(enum) { + /// The expression is the right-hand side of assignment to `_`. Only the side-effects of the + /// expression should be generated. The result instruction from the expression must + /// be ignored. + discard, + /// The expression has an inferred type, and it will be evaluated as an rvalue. + none, + /// The expression must generate a pointer rather than a value. For example, the left hand side + /// of an assignment uses this kind of result location. + ref, + /// The expression will be coerced into this type, but it will be evaluated as an rvalue. + ty: Zir.Inst.Ref, + /// Same as `ty` but it is guaranteed that Sema will additionally perform the coercion, + /// so no `as` instruction needs to be emitted. + coerced_ty: Zir.Inst.Ref, + /// The expression must store its result into this typed pointer. The result instruction + /// from the expression must be ignored. + ptr: PtrResultLoc, + /// The expression must store its result into this allocation, which has an inferred type. + /// The result instruction from the expression must be ignored. + /// Always an instruction with tag `alloc_inferred`. + inferred_ptr: Zir.Inst.Ref, + /// There is a pointer for the expression to store its result into, however, its type + /// is inferred based on peer type resolution for a `Zir.Inst.Block`. + /// The result instruction from the expression must be ignored. + block_ptr: *GenZir, + + const PtrResultLoc = struct { + inst: Zir.Inst.Ref, + src_node: ?Ast.Node.Index = null, + }; + + const Strategy = struct { + elide_store_to_block_ptr_instructions: bool, + tag: Tag, + + const Tag = enum { + /// Both branches will use break_void; result location is used to communicate the + /// result instruction. + break_void, + /// Use break statements to pass the block result value, and call rvalue() at + /// the end depending on rl. Also elide the store_to_block_ptr instructions + /// depending on rl. + break_operand, + }; + }; + + fn strategy(rl: Loc, block_scope: *GenZir) Strategy { + switch (rl) { + // In this branch there will not be any store_to_block_ptr instructions. + .none, .ty, .coerced_ty, .ref => return .{ + .tag = .break_operand, + .elide_store_to_block_ptr_instructions = false, + }, + .discard => return .{ + .tag = .break_void, + .elide_store_to_block_ptr_instructions = false, + }, + // The pointer got passed through to the sub-expressions, so we will use + // break_void here. + // In this branch there will not be any store_to_block_ptr instructions. + .ptr => return .{ + .tag = .break_void, + .elide_store_to_block_ptr_instructions = false, + }, + .inferred_ptr, .block_ptr => { + if (block_scope.rvalue_rl_count == block_scope.break_count) { + // Neither prong of the if consumed the result location, so we can + // use break instructions to create an rvalue. + return .{ + .tag = .break_operand, + .elide_store_to_block_ptr_instructions = true, + }; + } else { + // Allow the store_to_block_ptr instructions to remain so that + // semantic analysis can turn them into bitcasts. + return .{ + .tag = .break_void, + .elide_store_to_block_ptr_instructions = false, + }; + } + }, + } + } + }; + + const Context = enum { + /// The expression is the operand to a return expression. + @"return", + /// The expression is the input to an error-handling operator (if-else, try, or catch). + error_handling_expr, + /// The expression is the right-hand side of a shift operation. + shift_op, + /// The expression is an argument in a function call. + fn_arg, + /// The expression is the right-hand side of an initializer for a `const` variable + const_init, + /// The expression is the right-hand side of an assignment expression. + assignment, + /// No specific operator in particular. + none, + }; +}; + +const align_ri: ResultInfo = .{ .rl = .{ .ty = .u29_type } }; +const coerced_align_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .u29_type } }; +const bool_ri: ResultInfo = .{ .rl = .{ .ty = .bool_type } }; +const type_ri: ResultInfo = .{ .rl = .{ .ty = .type_type } }; +const coerced_type_ri: ResultInfo = .{ .rl = .{ .coerced_ty = .type_type } }; + +fn typeExpr(gz: *GenZir, scope: *Scope, type_node: Ast.Node.Index) InnerError!Zir.Inst.Ref { + const prev_force_comptime = gz.force_comptime; + gz.force_comptime = true; + defer gz.force_comptime = prev_force_comptime; + + return expr(gz, scope, coerced_type_ri, type_node); +} + +fn reachableTypeExpr( + gz: *GenZir, + scope: *Scope, + type_node: Ast.Node.Index, + reachable_node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const prev_force_comptime = gz.force_comptime; + gz.force_comptime = true; + defer gz.force_comptime = prev_force_comptime; + + return reachableExpr(gz, scope, coerced_type_ri, type_node, reachable_node); +} + +/// Same as `expr` but fails with a compile error if the result type is `noreturn`. +fn reachableExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + reachable_node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + return reachableExprComptime(gz, scope, ri, node, reachable_node, false); +} + +fn reachableExprComptime( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + reachable_node: Ast.Node.Index, + force_comptime: bool, +) InnerError!Zir.Inst.Ref { + const prev_force_comptime = gz.force_comptime; + gz.force_comptime = prev_force_comptime or force_comptime; + defer gz.force_comptime = prev_force_comptime; + + const result_inst = try expr(gz, scope, ri, node); + if (gz.refIsNoReturn(result_inst)) { + try gz.astgen.appendErrorNodeNotes(reachable_node, "unreachable code", .{}, &[_]u32{ + try gz.astgen.errNoteNode(node, "control flow is diverted here", .{}), + }); + } + return result_inst; +} + +fn lvalExpr(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + switch (node_tags[node]) { + .root => unreachable, + .@"usingnamespace" => unreachable, + .test_decl => unreachable, + .global_var_decl => unreachable, + .local_var_decl => unreachable, + .simple_var_decl => unreachable, + .aligned_var_decl => unreachable, + .switch_case => unreachable, + .switch_case_inline => unreachable, + .switch_case_one => unreachable, + .switch_case_inline_one => unreachable, + .container_field_init => unreachable, + .container_field_align => unreachable, + .container_field => unreachable, + .asm_output => unreachable, + .asm_input => unreachable, + + .assign, + .assign_bit_and, + .assign_bit_or, + .assign_shl, + .assign_shl_sat, + .assign_shr, + .assign_bit_xor, + .assign_div, + .assign_sub, + .assign_sub_wrap, + .assign_sub_sat, + .assign_mod, + .assign_add, + .assign_add_wrap, + .assign_add_sat, + .assign_mul, + .assign_mul_wrap, + .assign_mul_sat, + .add, + .add_wrap, + .add_sat, + .sub, + .sub_wrap, + .sub_sat, + .mul, + .mul_wrap, + .mul_sat, + .div, + .mod, + .bit_and, + .bit_or, + .shl, + .shl_sat, + .shr, + .bit_xor, + .bang_equal, + .equal_equal, + .greater_than, + .greater_or_equal, + .less_than, + .less_or_equal, + .array_cat, + .array_mult, + .bool_and, + .bool_or, + .@"asm", + .asm_simple, + .string_literal, + .number_literal, + .call, + .call_comma, + .async_call, + .async_call_comma, + .call_one, + .call_one_comma, + .async_call_one, + .async_call_one_comma, + .unreachable_literal, + .@"return", + .@"if", + .if_simple, + .@"while", + .while_simple, + .while_cont, + .bool_not, + .address_of, + .optional_type, + .block, + .block_semicolon, + .block_two, + .block_two_semicolon, + .@"break", + .ptr_type_aligned, + .ptr_type_sentinel, + .ptr_type, + .ptr_type_bit_range, + .array_type, + .array_type_sentinel, + .enum_literal, + .multiline_string_literal, + .char_literal, + .@"defer", + .@"errdefer", + .@"catch", + .error_union, + .merge_error_sets, + .switch_range, + .for_range, + .@"await", + .bit_not, + .negation, + .negation_wrap, + .@"resume", + .@"try", + .slice, + .slice_open, + .slice_sentinel, + .array_init_one, + .array_init_one_comma, + .array_init_dot_two, + .array_init_dot_two_comma, + .array_init_dot, + .array_init_dot_comma, + .array_init, + .array_init_comma, + .struct_init_one, + .struct_init_one_comma, + .struct_init_dot_two, + .struct_init_dot_two_comma, + .struct_init_dot, + .struct_init_dot_comma, + .struct_init, + .struct_init_comma, + .@"switch", + .switch_comma, + .@"for", + .for_simple, + .@"suspend", + .@"continue", + .fn_proto_simple, + .fn_proto_multi, + .fn_proto_one, + .fn_proto, + .fn_decl, + .anyframe_type, + .anyframe_literal, + .error_set_decl, + .container_decl, + .container_decl_trailing, + .container_decl_two, + .container_decl_two_trailing, + .container_decl_arg, + .container_decl_arg_trailing, + .tagged_union, + .tagged_union_trailing, + .tagged_union_two, + .tagged_union_two_trailing, + .tagged_union_enum_tag, + .tagged_union_enum_tag_trailing, + .@"comptime", + .@"nosuspend", + .error_value, + => return astgen.failNode(node, "invalid left-hand side to assignment", .{}), + + .builtin_call, + .builtin_call_comma, + .builtin_call_two, + .builtin_call_two_comma, + => { + const builtin_token = main_tokens[node]; + const builtin_name = tree.tokenSlice(builtin_token); + // If the builtin is an invalid name, we don't cause an error here; instead + // let it pass, and the error will be "invalid builtin function" later. + if (BuiltinFn.list.get(builtin_name)) |info| { + if (!info.allows_lvalue) { + return astgen.failNode(node, "invalid left-hand side to assignment", .{}); + } + } + }, + + // These can be assigned to. + .unwrap_optional, + .deref, + .field_access, + .array_access, + .identifier, + .grouped_expression, + .@"orelse", + => {}, + } + return expr(gz, scope, .{ .rl = .ref }, node); +} + +/// Turn Zig AST into untyped ZIR instructions. +/// When `rl` is discard, ptr, inferred_ptr, or inferred_ptr, the +/// result instruction can be used to inspect whether it is isNoReturn() but that is it, +/// it must otherwise not be used. +fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + + const prev_anon_name_strategy = gz.anon_name_strategy; + defer gz.anon_name_strategy = prev_anon_name_strategy; + if (!nodeUsesAnonNameStrategy(tree, node)) { + gz.anon_name_strategy = .anon; + } + + switch (node_tags[node]) { + .root => unreachable, // Top-level declaration. + .@"usingnamespace" => unreachable, // Top-level declaration. + .test_decl => unreachable, // Top-level declaration. + .container_field_init => unreachable, // Top-level declaration. + .container_field_align => unreachable, // Top-level declaration. + .container_field => unreachable, // Top-level declaration. + .fn_decl => unreachable, // Top-level declaration. + + .global_var_decl => unreachable, // Handled in `blockExpr`. + .local_var_decl => unreachable, // Handled in `blockExpr`. + .simple_var_decl => unreachable, // Handled in `blockExpr`. + .aligned_var_decl => unreachable, // Handled in `blockExpr`. + .@"defer" => unreachable, // Handled in `blockExpr`. + .@"errdefer" => unreachable, // Handled in `blockExpr`. + + .switch_case => unreachable, // Handled in `switchExpr`. + .switch_case_inline => unreachable, // Handled in `switchExpr`. + .switch_case_one => unreachable, // Handled in `switchExpr`. + .switch_case_inline_one => unreachable, // Handled in `switchExpr`. + .switch_range => unreachable, // Handled in `switchExpr`. + + .asm_output => unreachable, // Handled in `asmExpr`. + .asm_input => unreachable, // Handled in `asmExpr`. + + .for_range => unreachable, // Handled in `forExpr`. + + .assign => { + try assign(gz, scope, node); + return rvalue(gz, ri, .void_value, node); + }, + + .assign_shl => { + try assignShift(gz, scope, node, .shl); + return rvalue(gz, ri, .void_value, node); + }, + .assign_shl_sat => { + try assignShiftSat(gz, scope, node); + return rvalue(gz, ri, .void_value, node); + }, + .assign_shr => { + try assignShift(gz, scope, node, .shr); + return rvalue(gz, ri, .void_value, node); + }, + + .assign_bit_and => { + try assignOp(gz, scope, node, .bit_and); + return rvalue(gz, ri, .void_value, node); + }, + .assign_bit_or => { + try assignOp(gz, scope, node, .bit_or); + return rvalue(gz, ri, .void_value, node); + }, + .assign_bit_xor => { + try assignOp(gz, scope, node, .xor); + return rvalue(gz, ri, .void_value, node); + }, + .assign_div => { + try assignOp(gz, scope, node, .div); + return rvalue(gz, ri, .void_value, node); + }, + .assign_sub => { + try assignOp(gz, scope, node, .sub); + return rvalue(gz, ri, .void_value, node); + }, + .assign_sub_wrap => { + try assignOp(gz, scope, node, .subwrap); + return rvalue(gz, ri, .void_value, node); + }, + .assign_sub_sat => { + try assignOp(gz, scope, node, .sub_sat); + return rvalue(gz, ri, .void_value, node); + }, + .assign_mod => { + try assignOp(gz, scope, node, .mod_rem); + return rvalue(gz, ri, .void_value, node); + }, + .assign_add => { + try assignOp(gz, scope, node, .add); + return rvalue(gz, ri, .void_value, node); + }, + .assign_add_wrap => { + try assignOp(gz, scope, node, .addwrap); + return rvalue(gz, ri, .void_value, node); + }, + .assign_add_sat => { + try assignOp(gz, scope, node, .add_sat); + return rvalue(gz, ri, .void_value, node); + }, + .assign_mul => { + try assignOp(gz, scope, node, .mul); + return rvalue(gz, ri, .void_value, node); + }, + .assign_mul_wrap => { + try assignOp(gz, scope, node, .mulwrap); + return rvalue(gz, ri, .void_value, node); + }, + .assign_mul_sat => { + try assignOp(gz, scope, node, .mul_sat); + return rvalue(gz, ri, .void_value, node); + }, + + // zig fmt: off + .shl => return shiftOp(gz, scope, ri, node, node_datas[node].lhs, node_datas[node].rhs, .shl), + .shr => return shiftOp(gz, scope, ri, node, node_datas[node].lhs, node_datas[node].rhs, .shr), + + .add => return simpleBinOp(gz, scope, ri, node, .add), + .add_wrap => return simpleBinOp(gz, scope, ri, node, .addwrap), + .add_sat => return simpleBinOp(gz, scope, ri, node, .add_sat), + .sub => return simpleBinOp(gz, scope, ri, node, .sub), + .sub_wrap => return simpleBinOp(gz, scope, ri, node, .subwrap), + .sub_sat => return simpleBinOp(gz, scope, ri, node, .sub_sat), + .mul => return simpleBinOp(gz, scope, ri, node, .mul), + .mul_wrap => return simpleBinOp(gz, scope, ri, node, .mulwrap), + .mul_sat => return simpleBinOp(gz, scope, ri, node, .mul_sat), + .div => return simpleBinOp(gz, scope, ri, node, .div), + .mod => return simpleBinOp(gz, scope, ri, node, .mod_rem), + .shl_sat => return simpleBinOp(gz, scope, ri, node, .shl_sat), + + .bit_and => return simpleBinOp(gz, scope, ri, node, .bit_and), + .bit_or => return simpleBinOp(gz, scope, ri, node, .bit_or), + .bit_xor => return simpleBinOp(gz, scope, ri, node, .xor), + .bang_equal => return simpleBinOp(gz, scope, ri, node, .cmp_neq), + .equal_equal => return simpleBinOp(gz, scope, ri, node, .cmp_eq), + .greater_than => return simpleBinOp(gz, scope, ri, node, .cmp_gt), + .greater_or_equal => return simpleBinOp(gz, scope, ri, node, .cmp_gte), + .less_than => return simpleBinOp(gz, scope, ri, node, .cmp_lt), + .less_or_equal => return simpleBinOp(gz, scope, ri, node, .cmp_lte), + .array_cat => return simpleBinOp(gz, scope, ri, node, .array_cat), + + .array_mult => { + const result = try gz.addPlNode(.array_mul, node, Zir.Inst.Bin{ + .lhs = try expr(gz, scope, .{ .rl = .none }, node_datas[node].lhs), + .rhs = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, node_datas[node].rhs), + }); + return rvalue(gz, ri, result, node); + }, + + .error_union => return simpleBinOp(gz, scope, ri, node, .error_union_type), + .merge_error_sets => return simpleBinOp(gz, scope, ri, node, .merge_error_sets), + + .bool_and => return boolBinOp(gz, scope, ri, node, .bool_br_and), + .bool_or => return boolBinOp(gz, scope, ri, node, .bool_br_or), + + .bool_not => return simpleUnOp(gz, scope, ri, node, bool_ri, node_datas[node].lhs, .bool_not), + .bit_not => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, node_datas[node].lhs, .bit_not), + + .negation => return negation(gz, scope, ri, node), + .negation_wrap => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, node_datas[node].lhs, .negate_wrap), + + .identifier => return identifier(gz, scope, ri, node), + + .asm_simple, + .@"asm", + => return asmExpr(gz, scope, ri, node, tree.fullAsm(node).?), + + .string_literal => return stringLiteral(gz, ri, node), + .multiline_string_literal => return multilineStringLiteral(gz, ri, node), + + .number_literal => return numberLiteral(gz, ri, node, node, .positive), + // zig fmt: on + + .builtin_call_two, .builtin_call_two_comma => { + if (node_datas[node].lhs == 0) { + const params = [_]Ast.Node.Index{}; + return builtinCall(gz, scope, ri, node, ¶ms); + } else if (node_datas[node].rhs == 0) { + const params = [_]Ast.Node.Index{node_datas[node].lhs}; + return builtinCall(gz, scope, ri, node, ¶ms); + } else { + const params = [_]Ast.Node.Index{ node_datas[node].lhs, node_datas[node].rhs }; + return builtinCall(gz, scope, ri, node, ¶ms); + } + }, + .builtin_call, .builtin_call_comma => { + const params = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs]; + return builtinCall(gz, scope, ri, node, params); + }, + + .call_one, + .call_one_comma, + .async_call_one, + .async_call_one_comma, + .call, + .call_comma, + .async_call, + .async_call_comma, + => { + var buf: [1]Ast.Node.Index = undefined; + return callExpr(gz, scope, ri, node, tree.fullCall(&buf, node).?); + }, + + .unreachable_literal => { + try emitDbgNode(gz, node); + _ = try gz.addAsIndex(.{ + .tag = .@"unreachable", + .data = .{ .@"unreachable" = .{ + .force_comptime = gz.force_comptime, + .src_node = gz.nodeIndexToRelative(node), + } }, + }); + return Zir.Inst.Ref.unreachable_value; + }, + .@"return" => return ret(gz, scope, node), + .field_access => return fieldAccess(gz, scope, ri, node), + + .if_simple, + .@"if", + => return ifExpr(gz, scope, ri.br(), node, tree.fullIf(node).?), + + .while_simple, + .while_cont, + .@"while", + => return whileExpr(gz, scope, ri.br(), node, tree.fullWhile(node).?, false), + + .for_simple, .@"for" => return forExpr(gz, scope, ri.br(), node, tree.fullFor(node).?, false), + + .slice_open => { + const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs); + + maybeAdvanceSourceCursorToMainToken(gz, node); + const line = gz.astgen.source_line - gz.decl_line; + const column = gz.astgen.source_column; + + const start = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, node_datas[node].rhs); + try emitDbgStmt(gz, line, column); + const result = try gz.addPlNode(.slice_start, node, Zir.Inst.SliceStart{ + .lhs = lhs, + .start = start, + }); + return rvalue(gz, ri, result, node); + }, + .slice => { + const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs); + + maybeAdvanceSourceCursorToMainToken(gz, node); + const line = gz.astgen.source_line - gz.decl_line; + const column = gz.astgen.source_column; + + const extra = tree.extraData(node_datas[node].rhs, Ast.Node.Slice); + const start = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, extra.start); + const end = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, extra.end); + try emitDbgStmt(gz, line, column); + const result = try gz.addPlNode(.slice_end, node, Zir.Inst.SliceEnd{ + .lhs = lhs, + .start = start, + .end = end, + }); + return rvalue(gz, ri, result, node); + }, + .slice_sentinel => { + const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs); + + maybeAdvanceSourceCursorToMainToken(gz, node); + const line = gz.astgen.source_line - gz.decl_line; + const column = gz.astgen.source_column; + + const extra = tree.extraData(node_datas[node].rhs, Ast.Node.SliceSentinel); + const start = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, extra.start); + const end = if (extra.end != 0) try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, extra.end) else .none; + const sentinel = try expr(gz, scope, .{ .rl = .none }, extra.sentinel); + try emitDbgStmt(gz, line, column); + const result = try gz.addPlNode(.slice_sentinel, node, Zir.Inst.SliceSentinel{ + .lhs = lhs, + .start = start, + .end = end, + .sentinel = sentinel, + }); + return rvalue(gz, ri, result, node); + }, + + .deref => { + const lhs = try expr(gz, scope, .{ .rl = .none }, node_datas[node].lhs); + _ = try gz.addUnNode(.validate_deref, lhs, node); + switch (ri.rl) { + .ref => return lhs, + else => { + const result = try gz.addUnNode(.load, lhs, node); + return rvalue(gz, ri, result, node); + }, + } + }, + .address_of => { + const result = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs); + return rvalue(gz, ri, result, node); + }, + .optional_type => { + const operand = try typeExpr(gz, scope, node_datas[node].lhs); + const result = try gz.addUnNode(.optional_type, operand, node); + return rvalue(gz, ri, result, node); + }, + .unwrap_optional => switch (ri.rl) { + .ref => { + const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs); + + maybeAdvanceSourceCursorToMainToken(gz, node); + const line = gz.astgen.source_line - gz.decl_line; + const column = gz.astgen.source_column; + try emitDbgStmt(gz, line, column); + + return gz.addUnNode(.optional_payload_safe_ptr, lhs, node); + }, + else => { + const lhs = try expr(gz, scope, .{ .rl = .none }, node_datas[node].lhs); + + maybeAdvanceSourceCursorToMainToken(gz, node); + const line = gz.astgen.source_line - gz.decl_line; + const column = gz.astgen.source_column; + try emitDbgStmt(gz, line, column); + + return rvalue(gz, ri, try gz.addUnNode(.optional_payload_safe, lhs, node), node); + }, + }, + .block_two, .block_two_semicolon => { + const statements = [2]Ast.Node.Index{ node_datas[node].lhs, node_datas[node].rhs }; + if (node_datas[node].lhs == 0) { + return blockExpr(gz, scope, ri, node, statements[0..0]); + } else if (node_datas[node].rhs == 0) { + return blockExpr(gz, scope, ri, node, statements[0..1]); + } else { + return blockExpr(gz, scope, ri, node, statements[0..2]); + } + }, + .block, .block_semicolon => { + const statements = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs]; + return blockExpr(gz, scope, ri, node, statements); + }, + .enum_literal => return simpleStrTok(gz, ri, main_tokens[node], node, .enum_literal), + .error_value => return simpleStrTok(gz, ri, node_datas[node].rhs, node, .error_value), + // TODO restore this when implementing https://github.com/ziglang/zig/issues/6025 + // .anyframe_literal => return rvalue(gz, ri, .anyframe_type, node), + .anyframe_literal => { + const result = try gz.addUnNode(.anyframe_type, .void_type, node); + return rvalue(gz, ri, result, node); + }, + .anyframe_type => { + const return_type = try typeExpr(gz, scope, node_datas[node].rhs); + const result = try gz.addUnNode(.anyframe_type, return_type, node); + return rvalue(gz, ri, result, node); + }, + .@"catch" => { + const catch_token = main_tokens[node]; + const payload_token: ?Ast.TokenIndex = if (token_tags[catch_token + 1] == .pipe) + catch_token + 2 + else + null; + switch (ri.rl) { + .ref => return orelseCatchExpr( + gz, + scope, + ri, + node, + node_datas[node].lhs, + .is_non_err_ptr, + .err_union_payload_unsafe_ptr, + .err_union_code_ptr, + node_datas[node].rhs, + payload_token, + ), + else => return orelseCatchExpr( + gz, + scope, + ri, + node, + node_datas[node].lhs, + .is_non_err, + .err_union_payload_unsafe, + .err_union_code, + node_datas[node].rhs, + payload_token, + ), + } + }, + .@"orelse" => switch (ri.rl) { + .ref => return orelseCatchExpr( + gz, + scope, + ri, + node, + node_datas[node].lhs, + .is_non_null_ptr, + .optional_payload_unsafe_ptr, + undefined, + node_datas[node].rhs, + null, + ), + else => return orelseCatchExpr( + gz, + scope, + ri, + node, + node_datas[node].lhs, + .is_non_null, + .optional_payload_unsafe, + undefined, + node_datas[node].rhs, + null, + ), + }, + + .ptr_type_aligned, + .ptr_type_sentinel, + .ptr_type, + .ptr_type_bit_range, + => return ptrType(gz, scope, ri, node, tree.fullPtrType(node).?), + + .container_decl, + .container_decl_trailing, + .container_decl_arg, + .container_decl_arg_trailing, + .container_decl_two, + .container_decl_two_trailing, + .tagged_union, + .tagged_union_trailing, + .tagged_union_enum_tag, + .tagged_union_enum_tag_trailing, + .tagged_union_two, + .tagged_union_two_trailing, + => { + var buf: [2]Ast.Node.Index = undefined; + return containerDecl(gz, scope, ri, node, tree.fullContainerDecl(&buf, node).?); + }, + + .@"break" => return breakExpr(gz, scope, node), + .@"continue" => return continueExpr(gz, scope, node), + .grouped_expression => return expr(gz, scope, ri, node_datas[node].lhs), + .array_type => return arrayType(gz, scope, ri, node), + .array_type_sentinel => return arrayTypeSentinel(gz, scope, ri, node), + .char_literal => return charLiteral(gz, ri, node), + .error_set_decl => return errorSetDecl(gz, ri, node), + .array_access => return arrayAccess(gz, scope, ri, node), + .@"comptime" => return comptimeExprAst(gz, scope, ri, node), + .@"switch", .switch_comma => return switchExpr(gz, scope, ri.br(), node), + + .@"nosuspend" => return nosuspendExpr(gz, scope, ri, node), + .@"suspend" => return suspendExpr(gz, scope, node), + .@"await" => return awaitExpr(gz, scope, ri, node), + .@"resume" => return resumeExpr(gz, scope, ri, node), + + .@"try" => return tryExpr(gz, scope, ri, node, node_datas[node].lhs), + + .array_init_one, + .array_init_one_comma, + .array_init_dot_two, + .array_init_dot_two_comma, + .array_init_dot, + .array_init_dot_comma, + .array_init, + .array_init_comma, + => { + var buf: [2]Ast.Node.Index = undefined; + return arrayInitExpr(gz, scope, ri, node, tree.fullArrayInit(&buf, node).?); + }, + + .struct_init_one, + .struct_init_one_comma, + .struct_init_dot_two, + .struct_init_dot_two_comma, + .struct_init_dot, + .struct_init_dot_comma, + .struct_init, + .struct_init_comma, + => { + var buf: [2]Ast.Node.Index = undefined; + return structInitExpr(gz, scope, ri, node, tree.fullStructInit(&buf, node).?); + }, + + .fn_proto_simple, + .fn_proto_multi, + .fn_proto_one, + .fn_proto, + => { + var buf: [1]Ast.Node.Index = undefined; + return fnProtoExpr(gz, scope, ri, node, tree.fullFnProto(&buf, node).?); + }, + } +} + +fn nosuspendExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const body_node = node_datas[node].lhs; + assert(body_node != 0); + if (gz.nosuspend_node != 0) { + try astgen.appendErrorNodeNotes(node, "redundant nosuspend block", .{}, &[_]u32{ + try astgen.errNoteNode(gz.nosuspend_node, "other nosuspend block here", .{}), + }); + } + gz.nosuspend_node = node; + defer gz.nosuspend_node = 0; + return expr(gz, scope, ri, body_node); +} + +fn suspendExpr( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const body_node = node_datas[node].lhs; + + if (gz.nosuspend_node != 0) { + return astgen.failNodeNotes(node, "suspend inside nosuspend block", .{}, &[_]u32{ + try astgen.errNoteNode(gz.nosuspend_node, "nosuspend block here", .{}), + }); + } + if (gz.suspend_node != 0) { + return astgen.failNodeNotes(node, "cannot suspend inside suspend block", .{}, &[_]u32{ + try astgen.errNoteNode(gz.suspend_node, "other suspend block here", .{}), + }); + } + assert(body_node != 0); + + const suspend_inst = try gz.makeBlockInst(.suspend_block, node); + try gz.instructions.append(gpa, suspend_inst); + + var suspend_scope = gz.makeSubBlock(scope); + suspend_scope.suspend_node = node; + defer suspend_scope.unstack(); + + const body_result = try expr(&suspend_scope, &suspend_scope.base, .{ .rl = .none }, body_node); + if (!gz.refIsNoReturn(body_result)) { + _ = try suspend_scope.addBreak(.break_inline, suspend_inst, .void_value); + } + try suspend_scope.setBlockBody(suspend_inst); + + return indexToRef(suspend_inst); +} + +fn awaitExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const rhs_node = node_datas[node].lhs; + + if (gz.suspend_node != 0) { + return astgen.failNodeNotes(node, "cannot await inside suspend block", .{}, &[_]u32{ + try astgen.errNoteNode(gz.suspend_node, "suspend block here", .{}), + }); + } + const operand = try expr(gz, scope, .{ .rl = .none }, rhs_node); + const result = if (gz.nosuspend_node != 0) + try gz.addExtendedPayload(.await_nosuspend, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = operand, + }) + else + try gz.addUnNode(.@"await", operand, node); + + return rvalue(gz, ri, result, node); +} + +fn resumeExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const rhs_node = node_datas[node].lhs; + const operand = try expr(gz, scope, .{ .rl = .none }, rhs_node); + const result = try gz.addUnNode(.@"resume", operand, node); + return rvalue(gz, ri, result, node); +} + +fn fnProtoExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + fn_proto: Ast.full.FnProto, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + + if (fn_proto.name_token) |some| { + return astgen.failTok(some, "function type cannot have a name", .{}); + } + + const is_extern = blk: { + const maybe_extern_token = fn_proto.extern_export_inline_token orelse break :blk false; + break :blk token_tags[maybe_extern_token] == .keyword_extern; + }; + assert(!is_extern); + + var block_scope = gz.makeSubBlock(scope); + defer block_scope.unstack(); + + const block_inst = try gz.makeBlockInst(.block_inline, node); + + var noalias_bits: u32 = 0; + const is_var_args = is_var_args: { + var param_type_i: usize = 0; + var it = fn_proto.iterate(tree); + while (it.next()) |param| : (param_type_i += 1) { + const is_comptime = if (param.comptime_noalias) |token| switch (token_tags[token]) { + .keyword_noalias => is_comptime: { + noalias_bits |= @as(u32, 1) << (std.math.cast(u5, param_type_i) orelse + return astgen.failTok(token, "this compiler implementation only supports 'noalias' on the first 32 parameters", .{})); + break :is_comptime false; + }, + .keyword_comptime => true, + else => false, + } else false; + + const is_anytype = if (param.anytype_ellipsis3) |token| blk: { + switch (token_tags[token]) { + .keyword_anytype => break :blk true, + .ellipsis3 => break :is_var_args true, + else => unreachable, + } + } else false; + + const param_name: u32 = if (param.name_token) |name_token| blk: { + if (mem.eql(u8, "_", tree.tokenSlice(name_token))) + break :blk 0; + + break :blk try astgen.identAsString(name_token); + } else 0; + + if (is_anytype) { + const name_token = param.name_token orelse param.anytype_ellipsis3.?; + + const tag: Zir.Inst.Tag = if (is_comptime) + .param_anytype_comptime + else + .param_anytype; + _ = try block_scope.addStrTok(tag, param_name, name_token); + } else { + const param_type_node = param.type_expr; + assert(param_type_node != 0); + var param_gz = block_scope.makeSubBlock(scope); + defer param_gz.unstack(); + const param_type = try expr(¶m_gz, scope, coerced_type_ri, param_type_node); + const param_inst_expected = @intCast(u32, astgen.instructions.len + 1); + _ = try param_gz.addBreakWithSrcNode(.break_inline, param_inst_expected, param_type, param_type_node); + const main_tokens = tree.nodes.items(.main_token); + const name_token = param.name_token orelse main_tokens[param_type_node]; + const tag: Zir.Inst.Tag = if (is_comptime) .param_comptime else .param; + const param_inst = try block_scope.addParam(¶m_gz, tag, name_token, param_name, param.first_doc_comment); + assert(param_inst_expected == param_inst); + } + } + break :is_var_args false; + }; + + const align_ref: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: { + break :inst try expr(&block_scope, scope, align_ri, fn_proto.ast.align_expr); + }; + + if (fn_proto.ast.addrspace_expr != 0) { + return astgen.failNode(fn_proto.ast.addrspace_expr, "addrspace not allowed on function prototypes", .{}); + } + + if (fn_proto.ast.section_expr != 0) { + return astgen.failNode(fn_proto.ast.section_expr, "linksection not allowed on function prototypes", .{}); + } + + const cc: Zir.Inst.Ref = if (fn_proto.ast.callconv_expr != 0) + try expr( + &block_scope, + scope, + .{ .rl = .{ .ty = .calling_convention_type } }, + fn_proto.ast.callconv_expr, + ) + else + Zir.Inst.Ref.none; + + const maybe_bang = tree.firstToken(fn_proto.ast.return_type) - 1; + const is_inferred_error = token_tags[maybe_bang] == .bang; + if (is_inferred_error) { + return astgen.failTok(maybe_bang, "function prototype may not have inferred error set", .{}); + } + const ret_ty = try expr(&block_scope, scope, coerced_type_ri, fn_proto.ast.return_type); + + const result = try block_scope.addFunc(.{ + .src_node = fn_proto.ast.proto_node, + + .cc_ref = cc, + .cc_gz = null, + .align_ref = align_ref, + .align_gz = null, + .ret_ref = ret_ty, + .ret_gz = null, + .section_ref = .none, + .section_gz = null, + .addrspace_ref = .none, + .addrspace_gz = null, + + .param_block = block_inst, + .body_gz = null, + .lib_name = 0, + .is_var_args = is_var_args, + .is_inferred_error = false, + .is_test = false, + .is_extern = false, + .is_noinline = false, + .noalias_bits = noalias_bits, + }); + + _ = try block_scope.addBreak(.break_inline, block_inst, result); + try block_scope.setBlockBody(block_inst); + try gz.instructions.append(astgen.gpa, block_inst); + + return rvalue(gz, ri, indexToRef(block_inst), fn_proto.ast.proto_node); +} + +fn arrayInitExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + array_init: Ast.full.ArrayInit, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + + assert(array_init.ast.elements.len != 0); // Otherwise it would be struct init. + + const types: struct { + array: Zir.Inst.Ref, + elem: Zir.Inst.Ref, + } = inst: { + if (array_init.ast.type_expr == 0) break :inst .{ + .array = .none, + .elem = .none, + }; + + infer: { + const array_type: Ast.full.ArrayType = tree.fullArrayType(array_init.ast.type_expr) orelse break :infer; + // This intentionally does not support `@"_"` syntax. + if (node_tags[array_type.ast.elem_count] == .identifier and + mem.eql(u8, tree.tokenSlice(main_tokens[array_type.ast.elem_count]), "_")) + { + const len_inst = try gz.addInt(array_init.ast.elements.len); + const elem_type = try typeExpr(gz, scope, array_type.ast.elem_type); + if (array_type.ast.sentinel == 0) { + const array_type_inst = try gz.addPlNode(.array_type, array_init.ast.type_expr, Zir.Inst.Bin{ + .lhs = len_inst, + .rhs = elem_type, + }); + break :inst .{ + .array = array_type_inst, + .elem = elem_type, + }; + } else { + const sentinel = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = elem_type } }, array_type.ast.sentinel); + const array_type_inst = try gz.addPlNode( + .array_type_sentinel, + array_init.ast.type_expr, + Zir.Inst.ArrayTypeSentinel{ + .len = len_inst, + .elem_type = elem_type, + .sentinel = sentinel, + }, + ); + break :inst .{ + .array = array_type_inst, + .elem = elem_type, + }; + } + } + } + const array_type_inst = try typeExpr(gz, scope, array_init.ast.type_expr); + _ = try gz.addPlNode(.validate_array_init_ty, node, Zir.Inst.ArrayInit{ + .ty = array_type_inst, + .init_count = @intCast(u32, array_init.ast.elements.len), + }); + break :inst .{ + .array = array_type_inst, + .elem = .none, + }; + }; + + switch (ri.rl) { + .discard => { + // TODO elements should still be coerced if type is provided + for (array_init.ast.elements) |elem_init| { + _ = try expr(gz, scope, .{ .rl = .discard }, elem_init); + } + return Zir.Inst.Ref.void_value; + }, + .ref => { + const tag: Zir.Inst.Tag = if (types.array != .none) .array_init_ref else .array_init_anon_ref; + return arrayInitExprInner(gz, scope, node, array_init.ast.elements, types.array, types.elem, tag); + }, + .none => { + const tag: Zir.Inst.Tag = if (types.array != .none) .array_init else .array_init_anon; + return arrayInitExprInner(gz, scope, node, array_init.ast.elements, types.array, types.elem, tag); + }, + .ty, .coerced_ty => { + const tag: Zir.Inst.Tag = if (types.array != .none) .array_init else .array_init_anon; + const result = try arrayInitExprInner(gz, scope, node, array_init.ast.elements, types.array, types.elem, tag); + return rvalue(gz, ri, result, node); + }, + .ptr => |ptr_res| { + return arrayInitExprRlPtr(gz, scope, ri, node, ptr_res.inst, array_init.ast.elements, types.array); + }, + .inferred_ptr => |ptr_inst| { + if (types.array == .none) { + // We treat this case differently so that we don't get a crash when + // analyzing array_base_ptr against an alloc_inferred_mut. + // See corresponding logic in structInitExpr. + const result = try arrayInitExprRlNone(gz, scope, node, array_init.ast.elements, .array_init_anon); + return rvalue(gz, ri, result, node); + } else { + return arrayInitExprRlPtr(gz, scope, ri, node, ptr_inst, array_init.ast.elements, types.array); + } + }, + .block_ptr => |block_gz| { + // This condition is here for the same reason as the above condition in `inferred_ptr`. + // See corresponding logic in structInitExpr. + if (types.array == .none and astgen.isInferred(block_gz.rl_ptr)) { + const result = try arrayInitExprRlNone(gz, scope, node, array_init.ast.elements, .array_init_anon); + return rvalue(gz, ri, result, node); + } + return arrayInitExprRlPtr(gz, scope, ri, node, block_gz.rl_ptr, array_init.ast.elements, types.array); + }, + } +} + +fn arrayInitExprRlNone( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + elements: []const Ast.Node.Index, + tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + + const payload_index = try addExtra(astgen, Zir.Inst.MultiOp{ + .operands_len = @intCast(u32, elements.len), + }); + var extra_index = try reserveExtra(astgen, elements.len); + + for (elements) |elem_init| { + const elem_ref = try expr(gz, scope, .{ .rl = .none }, elem_init); + astgen.extra.items[extra_index] = @enumToInt(elem_ref); + extra_index += 1; + } + return try gz.addPlNodePayloadIndex(tag, node, payload_index); +} + +fn arrayInitExprInner( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + elements: []const Ast.Node.Index, + array_ty_inst: Zir.Inst.Ref, + elem_ty: Zir.Inst.Ref, + tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + + const len = elements.len + @boolToInt(array_ty_inst != .none); + const payload_index = try addExtra(astgen, Zir.Inst.MultiOp{ + .operands_len = @intCast(u32, len), + }); + var extra_index = try reserveExtra(astgen, len); + if (array_ty_inst != .none) { + astgen.extra.items[extra_index] = @enumToInt(array_ty_inst); + extra_index += 1; + } + + for (elements, 0..) |elem_init, i| { + const ri = if (elem_ty != .none) + ResultInfo{ .rl = .{ .coerced_ty = elem_ty } } + else if (array_ty_inst != .none and nodeMayNeedMemoryLocation(astgen.tree, elem_init, true)) ri: { + const ty_expr = try gz.add(.{ + .tag = .elem_type_index, + .data = .{ .bin = .{ + .lhs = array_ty_inst, + .rhs = @intToEnum(Zir.Inst.Ref, i), + } }, + }); + break :ri ResultInfo{ .rl = .{ .coerced_ty = ty_expr } }; + } else ResultInfo{ .rl = .{ .none = {} } }; + + const elem_ref = try expr(gz, scope, ri, elem_init); + astgen.extra.items[extra_index] = @enumToInt(elem_ref); + extra_index += 1; + } + + return try gz.addPlNodePayloadIndex(tag, node, payload_index); +} + +fn arrayInitExprRlPtr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + result_ptr: Zir.Inst.Ref, + elements: []const Ast.Node.Index, + array_ty: Zir.Inst.Ref, +) InnerError!Zir.Inst.Ref { + if (array_ty == .none) { + const base_ptr = try gz.addUnNode(.array_base_ptr, result_ptr, node); + return arrayInitExprRlPtrInner(gz, scope, node, base_ptr, elements); + } + + var as_scope = try gz.makeCoercionScope(scope, array_ty, result_ptr, node); + defer as_scope.unstack(); + + const result = try arrayInitExprRlPtrInner(&as_scope, scope, node, as_scope.rl_ptr, elements); + return as_scope.finishCoercion(gz, ri, node, result, array_ty); +} + +fn arrayInitExprRlPtrInner( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + result_ptr: Zir.Inst.Ref, + elements: []const Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + + const payload_index = try addExtra(astgen, Zir.Inst.Block{ + .body_len = @intCast(u32, elements.len), + }); + var extra_index = try reserveExtra(astgen, elements.len); + + for (elements, 0..) |elem_init, i| { + const elem_ptr = try gz.addPlNode(.elem_ptr_imm, elem_init, Zir.Inst.ElemPtrImm{ + .ptr = result_ptr, + .index = @intCast(u32, i), + }); + astgen.extra.items[extra_index] = refToIndex(elem_ptr).?; + extra_index += 1; + _ = try expr(gz, scope, .{ .rl = .{ .ptr = .{ .inst = elem_ptr } } }, elem_init); + } + + const tag: Zir.Inst.Tag = if (gz.force_comptime) + .validate_array_init_comptime + else + .validate_array_init; + + _ = try gz.addPlNodePayloadIndex(tag, node, payload_index); + return .void_value; +} + +fn structInitExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + struct_init: Ast.full.StructInit, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + + if (struct_init.ast.type_expr == 0) { + if (struct_init.ast.fields.len == 0) { + return rvalue(gz, ri, .empty_struct, node); + } + } else array: { + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + const array_type: Ast.full.ArrayType = tree.fullArrayType(struct_init.ast.type_expr) orelse { + if (struct_init.ast.fields.len == 0) { + const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); + const result = try gz.addUnNode(.struct_init_empty, ty_inst, node); + return rvalue(gz, ri, result, node); + } + break :array; + }; + const is_inferred_array_len = node_tags[array_type.ast.elem_count] == .identifier and + // This intentionally does not support `@"_"` syntax. + mem.eql(u8, tree.tokenSlice(main_tokens[array_type.ast.elem_count]), "_"); + if (struct_init.ast.fields.len == 0) { + if (is_inferred_array_len) { + const elem_type = try typeExpr(gz, scope, array_type.ast.elem_type); + const array_type_inst = if (array_type.ast.sentinel == 0) blk: { + break :blk try gz.addPlNode(.array_type, struct_init.ast.type_expr, Zir.Inst.Bin{ + .lhs = .zero_usize, + .rhs = elem_type, + }); + } else blk: { + const sentinel = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = elem_type } }, array_type.ast.sentinel); + break :blk try gz.addPlNode( + .array_type_sentinel, + struct_init.ast.type_expr, + Zir.Inst.ArrayTypeSentinel{ + .len = .zero_usize, + .elem_type = elem_type, + .sentinel = sentinel, + }, + ); + }; + const result = try gz.addUnNode(.struct_init_empty, array_type_inst, node); + return rvalue(gz, ri, result, node); + } + const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); + const result = try gz.addUnNode(.struct_init_empty, ty_inst, node); + return rvalue(gz, ri, result, node); + } else { + return astgen.failNode( + struct_init.ast.type_expr, + "initializing array with struct syntax", + .{}, + ); + } + } + + switch (ri.rl) { + .discard => { + if (struct_init.ast.type_expr != 0) { + const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); + _ = try gz.addUnNode(.validate_struct_init_ty, ty_inst, node); + _ = try structInitExprRlTy(gz, scope, node, struct_init, ty_inst, .struct_init); + } else { + _ = try structInitExprRlNone(gz, scope, node, struct_init, .none, .struct_init_anon); + } + return Zir.Inst.Ref.void_value; + }, + .ref => { + if (struct_init.ast.type_expr != 0) { + const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); + _ = try gz.addUnNode(.validate_struct_init_ty, ty_inst, node); + return structInitExprRlTy(gz, scope, node, struct_init, ty_inst, .struct_init_ref); + } else { + return structInitExprRlNone(gz, scope, node, struct_init, .none, .struct_init_anon_ref); + } + }, + .none => { + if (struct_init.ast.type_expr != 0) { + const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); + _ = try gz.addUnNode(.validate_struct_init_ty, ty_inst, node); + return structInitExprRlTy(gz, scope, node, struct_init, ty_inst, .struct_init); + } else { + return structInitExprRlNone(gz, scope, node, struct_init, .none, .struct_init_anon); + } + }, + .ty, .coerced_ty => |ty_inst| { + if (struct_init.ast.type_expr == 0) { + const result = try structInitExprRlNone(gz, scope, node, struct_init, ty_inst, .struct_init_anon); + return rvalue(gz, ri, result, node); + } + const inner_ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); + _ = try gz.addUnNode(.validate_struct_init_ty, inner_ty_inst, node); + const result = try structInitExprRlTy(gz, scope, node, struct_init, inner_ty_inst, .struct_init); + return rvalue(gz, ri, result, node); + }, + .ptr => |ptr_res| return structInitExprRlPtr(gz, scope, ri, node, struct_init, ptr_res.inst), + .inferred_ptr => |ptr_inst| { + if (struct_init.ast.type_expr == 0) { + // We treat this case differently so that we don't get a crash when + // analyzing field_base_ptr against an alloc_inferred_mut. + // See corresponding logic in arrayInitExpr. + const result = try structInitExprRlNone(gz, scope, node, struct_init, .none, .struct_init_anon); + return rvalue(gz, ri, result, node); + } else { + return structInitExprRlPtr(gz, scope, ri, node, struct_init, ptr_inst); + } + }, + .block_ptr => |block_gz| { + // This condition is here for the same reason as the above condition in `inferred_ptr`. + // See corresponding logic in arrayInitExpr. + if (struct_init.ast.type_expr == 0 and astgen.isInferred(block_gz.rl_ptr)) { + const result = try structInitExprRlNone(gz, scope, node, struct_init, .none, .struct_init_anon); + return rvalue(gz, ri, result, node); + } + + return structInitExprRlPtr(gz, scope, ri, node, struct_init, block_gz.rl_ptr); + }, + } +} + +fn structInitExprRlNone( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + struct_init: Ast.full.StructInit, + ty_inst: Zir.Inst.Ref, + tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + + const payload_index = try addExtra(astgen, Zir.Inst.StructInitAnon{ + .fields_len = @intCast(u32, struct_init.ast.fields.len), + }); + const field_size = @typeInfo(Zir.Inst.StructInitAnon.Item).Struct.fields.len; + var extra_index: usize = try reserveExtra(astgen, struct_init.ast.fields.len * field_size); + + for (struct_init.ast.fields) |field_init| { + const name_token = tree.firstToken(field_init) - 2; + const str_index = try astgen.identAsString(name_token); + const sub_ri: ResultInfo = if (ty_inst != .none) + ResultInfo{ .rl = .{ .ty = try gz.addPlNode(.field_type, field_init, Zir.Inst.FieldType{ + .container_type = ty_inst, + .name_start = str_index, + }) } } + else .{ .rl = .none }; + setExtra(astgen, extra_index, Zir.Inst.StructInitAnon.Item{ + .field_name = str_index, + .init = try expr(gz, scope, sub_ri, field_init), + }); + extra_index += field_size; + } + + return try gz.addPlNodePayloadIndex(tag, node, payload_index); +} + +fn structInitExprRlPtr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + struct_init: Ast.full.StructInit, + result_ptr: Zir.Inst.Ref, +) InnerError!Zir.Inst.Ref { + if (struct_init.ast.type_expr == 0) { + const base_ptr = try gz.addUnNode(.field_base_ptr, result_ptr, node); + return structInitExprRlPtrInner(gz, scope, node, struct_init, base_ptr); + } + const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); + _ = try gz.addUnNode(.validate_struct_init_ty, ty_inst, node); + + var as_scope = try gz.makeCoercionScope(scope, ty_inst, result_ptr, node); + defer as_scope.unstack(); + + const result = try structInitExprRlPtrInner(&as_scope, scope, node, struct_init, as_scope.rl_ptr); + return as_scope.finishCoercion(gz, ri, node, result, ty_inst); +} + +fn structInitExprRlPtrInner( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + struct_init: Ast.full.StructInit, + result_ptr: Zir.Inst.Ref, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + + const payload_index = try addExtra(astgen, Zir.Inst.Block{ + .body_len = @intCast(u32, struct_init.ast.fields.len), + }); + var extra_index = try reserveExtra(astgen, struct_init.ast.fields.len); + + for (struct_init.ast.fields) |field_init| { + const name_token = tree.firstToken(field_init) - 2; + const str_index = try astgen.identAsString(name_token); + const field_ptr = try gz.addPlNode(.field_ptr_init, field_init, Zir.Inst.Field{ + .lhs = result_ptr, + .field_name_start = str_index, + }); + astgen.extra.items[extra_index] = refToIndex(field_ptr).?; + extra_index += 1; + _ = try expr(gz, scope, .{ .rl = .{ .ptr = .{ .inst = field_ptr } } }, field_init); + } + + const tag: Zir.Inst.Tag = if (gz.force_comptime) + .validate_struct_init_comptime + else + .validate_struct_init; + + _ = try gz.addPlNodePayloadIndex(tag, node, payload_index); + return Zir.Inst.Ref.void_value; +} + +fn structInitExprRlTy( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + struct_init: Ast.full.StructInit, + ty_inst: Zir.Inst.Ref, + tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + + const payload_index = try addExtra(astgen, Zir.Inst.StructInit{ + .fields_len = @intCast(u32, struct_init.ast.fields.len), + }); + const field_size = @typeInfo(Zir.Inst.StructInit.Item).Struct.fields.len; + var extra_index: usize = try reserveExtra(astgen, struct_init.ast.fields.len * field_size); + + for (struct_init.ast.fields) |field_init| { + const name_token = tree.firstToken(field_init) - 2; + const str_index = try astgen.identAsString(name_token); + const field_ty_inst = try gz.addPlNode(.field_type, field_init, Zir.Inst.FieldType{ + .container_type = ty_inst, + .name_start = str_index, + }); + setExtra(astgen, extra_index, Zir.Inst.StructInit.Item{ + .field_type = refToIndex(field_ty_inst).?, + .init = try expr(gz, scope, .{ .rl = .{ .ty = field_ty_inst } }, field_init), + }); + extra_index += field_size; + } + + return try gz.addPlNodePayloadIndex(tag, node, payload_index); +} + +/// This calls expr in a comptime scope, and is intended to be called as a helper function. +/// The one that corresponds to `comptime` expression syntax is `comptimeExprAst`. +fn comptimeExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const prev_force_comptime = gz.force_comptime; + gz.force_comptime = true; + defer gz.force_comptime = prev_force_comptime; + + return expr(gz, scope, ri, node); +} + +/// This one is for an actual `comptime` syntax, and will emit a compile error if +/// the scope already has `force_comptime=true`. +/// See `comptimeExpr` for the helper function for calling expr in a comptime scope. +fn comptimeExprAst( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + if (gz.force_comptime) { + return astgen.failNode(node, "redundant comptime keyword in already comptime scope", .{}); + } + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const body_node = node_datas[node].lhs; + gz.force_comptime = true; + const result = try expr(gz, scope, ri, body_node); + gz.force_comptime = false; + return result; +} + +/// Restore the error return trace index. Performs the restore only if the result is a non-error or +/// if the result location is a non-error-handling expression. +fn restoreErrRetIndex( + gz: *GenZir, + bt: GenZir.BranchTarget, + ri: ResultInfo, + node: Ast.Node.Index, + result: Zir.Inst.Ref, +) !void { + const op = switch (nodeMayEvalToError(gz.astgen.tree, node)) { + .always => return, // never restore/pop + .never => .none, // always restore/pop + .maybe => switch (ri.ctx) { + .error_handling_expr, .@"return", .fn_arg, .const_init => switch (ri.rl) { + .ptr => |ptr_res| try gz.addUnNode(.load, ptr_res.inst, node), + .inferred_ptr => |ptr| try gz.addUnNode(.load, ptr, node), + .block_ptr => |block_scope| if (block_scope.rvalue_rl_count != block_scope.break_count) b: { + // The result location may have been used by this expression, in which case + // the operand is not the result and we need to load the rl ptr. + switch (gz.astgen.instructions.items(.tag)[Zir.refToIndex(block_scope.rl_ptr).?]) { + .alloc_inferred, .alloc_inferred_mut => { + // This is a terrible workaround for Sema's inability to load from a .alloc_inferred ptr + // before its type has been resolved. The operand we use here instead is not guaranteed + // to be valid, and when it's not, we will pop error traces prematurely. + // + // TODO: Update this to do a proper load from the rl_ptr, once Sema can support it. + break :b result; + }, + else => break :b try gz.addUnNode(.load, block_scope.rl_ptr, node), + } + } else result, + else => result, + }, + else => .none, // always restore/pop + }, + }; + _ = try gz.addRestoreErrRetIndex(bt, .{ .if_non_error = op }); +} + +fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { + const astgen = parent_gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const break_label = node_datas[node].lhs; + const rhs = node_datas[node].rhs; + + // Look for the label in the scope. + var scope = parent_scope; + while (true) { + switch (scope.tag) { + .gen_zir => { + const block_gz = scope.cast(GenZir).?; + + if (block_gz.cur_defer_node != 0) { + // We are breaking out of a `defer` block. + return astgen.failNodeNotes(node, "cannot break out of defer expression", .{}, &.{ + try astgen.errNoteNode( + block_gz.cur_defer_node, + "defer expression here", + .{}, + ), + }); + } + + const block_inst = blk: { + if (break_label != 0) { + if (block_gz.label) |*label| { + if (try astgen.tokenIdentEql(label.token, break_label)) { + label.used = true; + break :blk label.block_inst; + } + } + } else if (block_gz.break_block != 0) { + break :blk block_gz.break_block; + } + // If not the target, start over with the parent + scope = block_gz.parent; + continue; + }; + // If we made it here, this block is the target of the break expr + + const break_tag: Zir.Inst.Tag = if (block_gz.is_inline or block_gz.force_comptime) + .break_inline + else + .@"break"; + + block_gz.break_count += 1; + if (rhs == 0) { + _ = try rvalue(parent_gz, block_gz.break_result_info, .void_value, node); + + try genDefers(parent_gz, scope, parent_scope, .normal_only); + + // As our last action before the break, "pop" the error trace if needed + if (!block_gz.force_comptime) + _ = try parent_gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always); + + _ = try parent_gz.addBreak(break_tag, block_inst, .void_value); + return Zir.Inst.Ref.unreachable_value; + } + + const operand = try reachableExpr(parent_gz, parent_scope, block_gz.break_result_info, rhs, node); + const search_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + + try genDefers(parent_gz, scope, parent_scope, .normal_only); + + // As our last action before the break, "pop" the error trace if needed + if (!block_gz.force_comptime) + try restoreErrRetIndex(parent_gz, .{ .block = block_inst }, block_gz.break_result_info, rhs, operand); + + switch (block_gz.break_result_info.rl) { + .block_ptr => { + const br = try parent_gz.addBreakWithSrcNode(break_tag, block_inst, operand, rhs); + try block_gz.labeled_breaks.append(astgen.gpa, .{ .br = br, .search = search_index }); + }, + .ptr => { + // In this case we don't have any mechanism to intercept it; + // we assume the result location is written, and we break with void. + _ = try parent_gz.addBreak(break_tag, block_inst, .void_value); + }, + .discard => { + _ = try parent_gz.addBreak(break_tag, block_inst, .void_value); + }, + else => { + _ = try parent_gz.addBreakWithSrcNode(break_tag, block_inst, operand, rhs); + }, + } + return Zir.Inst.Ref.unreachable_value; + }, + .local_val => scope = scope.cast(Scope.LocalVal).?.parent, + .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, + .namespace, .enum_namespace => break, + .defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent, + .top => unreachable, + } + } + if (break_label != 0) { + const label_name = try astgen.identifierTokenString(break_label); + return astgen.failTok(break_label, "label not found: '{s}'", .{label_name}); + } else { + return astgen.failNode(node, "break expression outside loop", .{}); + } +} + +fn continueExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { + const astgen = parent_gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const break_label = node_datas[node].lhs; + + // Look for the label in the scope. + var scope = parent_scope; + while (true) { + switch (scope.tag) { + .gen_zir => { + const gen_zir = scope.cast(GenZir).?; + + if (gen_zir.cur_defer_node != 0) { + return astgen.failNodeNotes(node, "cannot continue out of defer expression", .{}, &.{ + try astgen.errNoteNode( + gen_zir.cur_defer_node, + "defer expression here", + .{}, + ), + }); + } + const continue_block = gen_zir.continue_block; + if (continue_block == 0) { + scope = gen_zir.parent; + continue; + } + if (break_label != 0) blk: { + if (gen_zir.label) |*label| { + if (try astgen.tokenIdentEql(label.token, break_label)) { + label.used = true; + break :blk; + } + } + // found continue but either it has a different label, or no label + scope = gen_zir.parent; + continue; + } + + const break_tag: Zir.Inst.Tag = if (gen_zir.is_inline or gen_zir.force_comptime) + .break_inline + else + .@"break"; + if (break_tag == .break_inline) { + _ = try parent_gz.addUnNode(.check_comptime_control_flow, Zir.indexToRef(continue_block), node); + } + + // As our last action before the continue, "pop" the error trace if needed + if (!gen_zir.force_comptime) + _ = try parent_gz.addRestoreErrRetIndex(.{ .block = continue_block }, .always); + + _ = try parent_gz.addBreak(break_tag, continue_block, .void_value); + return Zir.Inst.Ref.unreachable_value; + }, + .local_val => scope = scope.cast(Scope.LocalVal).?.parent, + .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, + .defer_normal => { + const defer_scope = scope.cast(Scope.Defer).?; + scope = defer_scope.parent; + try parent_gz.addDefer(defer_scope.index, defer_scope.len); + }, + .defer_error => scope = scope.cast(Scope.Defer).?.parent, + .namespace, .enum_namespace => break, + .top => unreachable, + } + } + if (break_label != 0) { + const label_name = try astgen.identifierTokenString(break_label); + return astgen.failTok(break_label, "label not found: '{s}'", .{label_name}); + } else { + return astgen.failNode(node, "continue expression outside loop", .{}); + } +} + +fn blockExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + block_node: Ast.Node.Index, + statements: []const Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const tracy = trace(@src()); + defer tracy.end(); + + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + + const lbrace = main_tokens[block_node]; + if (token_tags[lbrace - 1] == .colon and + token_tags[lbrace - 2] == .identifier) + { + return labeledBlockExpr(gz, scope, ri, block_node, statements); + } + + if (!gz.force_comptime) { + // Since this block is unlabeled, its control flow is effectively linear and we + // can *almost* get away with inlining the block here. However, we actually need + // to preserve the .block for Sema, to properly pop the error return trace. + + const block_tag: Zir.Inst.Tag = .block; + const block_inst = try gz.makeBlockInst(block_tag, block_node); + try gz.instructions.append(astgen.gpa, block_inst); + + var block_scope = gz.makeSubBlock(scope); + defer block_scope.unstack(); + + try blockExprStmts(&block_scope, &block_scope.base, statements); + + if (!block_scope.endsWithNoReturn()) { + // As our last action before the break, "pop" the error trace if needed + _ = try gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always); + + const break_tag: Zir.Inst.Tag = if (block_scope.force_comptime) .break_inline else .@"break"; + _ = try block_scope.addBreak(break_tag, block_inst, .void_value); + } + + try block_scope.setBlockBody(block_inst); + } else { + var sub_gz = gz.makeSubBlock(scope); + try blockExprStmts(&sub_gz, &sub_gz.base, statements); + } + + return rvalue(gz, ri, .void_value, block_node); +} + +fn checkLabelRedefinition(astgen: *AstGen, parent_scope: *Scope, label: Ast.TokenIndex) !void { + // Look for the label in the scope. + var scope = parent_scope; + while (true) { + switch (scope.tag) { + .gen_zir => { + const gen_zir = scope.cast(GenZir).?; + if (gen_zir.label) |prev_label| { + if (try astgen.tokenIdentEql(label, prev_label.token)) { + const label_name = try astgen.identifierTokenString(label); + return astgen.failTokNotes(label, "redefinition of label '{s}'", .{ + label_name, + }, &[_]u32{ + try astgen.errNoteTok( + prev_label.token, + "previous definition here", + .{}, + ), + }); + } + } + scope = gen_zir.parent; + }, + .local_val => scope = scope.cast(Scope.LocalVal).?.parent, + .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, + .defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent, + .namespace, .enum_namespace => break, + .top => unreachable, + } + } +} + +fn labeledBlockExpr( + gz: *GenZir, + parent_scope: *Scope, + ri: ResultInfo, + block_node: Ast.Node.Index, + statements: []const Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const tracy = trace(@src()); + defer tracy.end(); + + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + + const lbrace = main_tokens[block_node]; + const label_token = lbrace - 2; + assert(token_tags[label_token] == .identifier); + + try astgen.checkLabelRedefinition(parent_scope, label_token); + + // Reserve the Block ZIR instruction index so that we can put it into the GenZir struct + // so that break statements can reference it. + const block_tag: Zir.Inst.Tag = if (gz.force_comptime) .block_inline else .block; + const block_inst = try gz.makeBlockInst(block_tag, block_node); + try gz.instructions.append(astgen.gpa, block_inst); + + var block_scope = gz.makeSubBlock(parent_scope); + block_scope.label = GenZir.Label{ + .token = label_token, + .block_inst = block_inst, + }; + block_scope.setBreakResultInfo(ri); + defer block_scope.unstack(); + defer block_scope.labeled_breaks.deinit(astgen.gpa); + + try blockExprStmts(&block_scope, &block_scope.base, statements); + if (!block_scope.endsWithNoReturn()) { + // As our last action before the return, "pop" the error trace if needed + _ = try gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always); + + const break_tag: Zir.Inst.Tag = if (block_scope.force_comptime) .break_inline else .@"break"; + _ = try block_scope.addBreak(break_tag, block_inst, .void_value); + } + + if (!block_scope.label.?.used) { + try astgen.appendErrorTok(label_token, "unused block label", .{}); + } + + const zir_datas = gz.astgen.instructions.items(.data); + const zir_tags = gz.astgen.instructions.items(.tag); + const strat = ri.rl.strategy(&block_scope); + switch (strat.tag) { + .break_void => { + // The code took advantage of the result location as a pointer. + // Turn the break instruction operands into void. + for (block_scope.labeled_breaks.items) |br| { + zir_datas[br.br].@"break".operand = .void_value; + } + try block_scope.setBlockBody(block_inst); + + return indexToRef(block_inst); + }, + .break_operand => { + // All break operands are values that did not use the result location pointer + // (except for a single .store_to_block_ptr inst which we re-write here). + // The break instructions need to have their operands coerced if the + // block's result location is a `ty`. In this case we overwrite the + // `store_to_block_ptr` instruction with an `as` instruction and repurpose + // it as the break operand. + // This corresponds to similar code in `setCondBrPayloadElideBlockStorePtr`. + if (block_scope.rl_ty_inst != .none) { + for (block_scope.labeled_breaks.items) |br| { + // We expect the `store_to_block_ptr` to be created between 1-3 instructions + // prior to the break. + var search_index = br.search -| 3; + while (search_index < br.search) : (search_index += 1) { + if (zir_tags[search_index] == .store_to_block_ptr and + zir_datas[search_index].bin.lhs == block_scope.rl_ptr) + { + zir_tags[search_index] = .as; + zir_datas[search_index].bin = .{ + .lhs = block_scope.rl_ty_inst, + .rhs = zir_datas[br.br].@"break".operand, + }; + zir_datas[br.br].@"break".operand = indexToRef(search_index); + break; + } + } else unreachable; + } + } + try block_scope.setBlockBody(block_inst); + const block_ref = indexToRef(block_inst); + switch (ri.rl) { + .ref => return block_ref, + else => return rvalue(gz, ri, block_ref, block_node), + } + }, + } +} + +fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const Ast.Node.Index) !void { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_tags = tree.nodes.items(.tag); + const node_data = tree.nodes.items(.data); + + if (statements.len == 0) return; + + try gz.addDbgBlockBegin(); + + var block_arena = std.heap.ArenaAllocator.init(gz.astgen.gpa); + defer block_arena.deinit(); + const block_arena_allocator = block_arena.allocator(); + + var noreturn_src_node: Ast.Node.Index = 0; + var scope = parent_scope; + for (statements) |statement| { + if (noreturn_src_node != 0) { + try astgen.appendErrorNodeNotes( + statement, + "unreachable code", + .{}, + &[_]u32{ + try astgen.errNoteNode( + noreturn_src_node, + "control flow is diverted here", + .{}, + ), + }, + ); + } + var inner_node = statement; + while (true) { + switch (node_tags[inner_node]) { + // zig fmt: off + .global_var_decl, + .local_var_decl, + .simple_var_decl, + .aligned_var_decl, => scope = try varDecl(gz, scope, statement, block_arena_allocator, tree.fullVarDecl(statement).?), + + .@"defer" => scope = try deferStmt(gz, scope, statement, block_arena_allocator, .defer_normal), + .@"errdefer" => scope = try deferStmt(gz, scope, statement, block_arena_allocator, .defer_error), + + .assign => try assign(gz, scope, statement), + + .assign_shl => try assignShift(gz, scope, statement, .shl), + .assign_shr => try assignShift(gz, scope, statement, .shr), + + .assign_bit_and => try assignOp(gz, scope, statement, .bit_and), + .assign_bit_or => try assignOp(gz, scope, statement, .bit_or), + .assign_bit_xor => try assignOp(gz, scope, statement, .xor), + .assign_div => try assignOp(gz, scope, statement, .div), + .assign_sub => try assignOp(gz, scope, statement, .sub), + .assign_sub_wrap => try assignOp(gz, scope, statement, .subwrap), + .assign_mod => try assignOp(gz, scope, statement, .mod_rem), + .assign_add => try assignOp(gz, scope, statement, .add), + .assign_add_wrap => try assignOp(gz, scope, statement, .addwrap), + .assign_mul => try assignOp(gz, scope, statement, .mul), + .assign_mul_wrap => try assignOp(gz, scope, statement, .mulwrap), + + .grouped_expression => { + inner_node = node_data[statement].lhs; + continue; + }, + + .while_simple, + .while_cont, + .@"while", => _ = try whileExpr(gz, scope, .{ .rl = .none }, inner_node, tree.fullWhile(inner_node).?, true), + + .for_simple, + .@"for", => _ = try forExpr(gz, scope, .{ .rl = .none }, inner_node, tree.fullFor(inner_node).?, true), + + else => noreturn_src_node = try unusedResultExpr(gz, scope, inner_node), + // zig fmt: on + } + break; + } + } + + try gz.addDbgBlockEnd(); + + try genDefers(gz, parent_scope, scope, .normal_only); + try checkUsed(gz, parent_scope, scope); +} + +/// Returns AST source node of the thing that is noreturn if the statement is +/// definitely `noreturn`. Otherwise returns 0. +fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) InnerError!Ast.Node.Index { + try emitDbgNode(gz, statement); + // We need to emit an error if the result is not `noreturn` or `void`, but + // we want to avoid adding the ZIR instruction if possible for performance. + const maybe_unused_result = try expr(gz, scope, .{ .rl = .none }, statement); + return addEnsureResult(gz, maybe_unused_result, statement); +} + +fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: Ast.Node.Index) InnerError!Ast.Node.Index { + var noreturn_src_node: Ast.Node.Index = 0; + const elide_check = if (refToIndex(maybe_unused_result)) |inst| b: { + // Note that this array becomes invalid after appending more items to it + // in the above while loop. + const zir_tags = gz.astgen.instructions.items(.tag); + switch (zir_tags[inst]) { + // For some instructions, modify the zir data + // so we can avoid a separate ensure_result_used instruction. + .call => { + const extra_index = gz.astgen.instructions.items(.data)[inst].pl_node.payload_index; + const slot = &gz.astgen.extra.items[extra_index]; + var flags = @bitCast(Zir.Inst.Call.Flags, slot.*); + flags.ensure_result_used = true; + slot.* = @bitCast(u32, flags); + break :b true; + }, + .builtin_call => { + const extra_index = gz.astgen.instructions.items(.data)[inst].pl_node.payload_index; + const slot = &gz.astgen.extra.items[extra_index]; + var flags = @bitCast(Zir.Inst.BuiltinCall.Flags, slot.*); + flags.ensure_result_used = true; + slot.* = @bitCast(u32, flags); + break :b true; + }, + + // ZIR instructions that might be a type other than `noreturn` or `void`. + .add, + .addwrap, + .add_sat, + .add_unsafe, + .param, + .param_comptime, + .param_anytype, + .param_anytype_comptime, + .alloc, + .alloc_mut, + .alloc_comptime_mut, + .alloc_inferred, + .alloc_inferred_mut, + .alloc_inferred_comptime, + .alloc_inferred_comptime_mut, + .make_ptr_const, + .array_cat, + .array_mul, + .array_type, + .array_type_sentinel, + .elem_type_index, + .vector_type, + .indexable_ptr_len, + .anyframe_type, + .as, + .as_node, + .as_shift_operand, + .bit_and, + .bitcast, + .bit_or, + .block, + .block_inline, + .suspend_block, + .loop, + .bool_br_and, + .bool_br_or, + .bool_not, + .cmp_lt, + .cmp_lte, + .cmp_eq, + .cmp_gte, + .cmp_gt, + .cmp_neq, + .coerce_result_ptr, + .decl_ref, + .decl_val, + .load, + .div, + .elem_ptr, + .elem_val, + .elem_ptr_node, + .elem_ptr_imm, + .elem_val_node, + .field_ptr, + .field_ptr_init, + .field_val, + .field_call_bind, + .field_ptr_named, + .field_val_named, + .func, + .func_inferred, + .func_fancy, + .int, + .int_big, + .float, + .float128, + .int_type, + .is_non_null, + .is_non_null_ptr, + .is_non_err, + .is_non_err_ptr, + .ret_is_non_err, + .mod_rem, + .mul, + .mulwrap, + .mul_sat, + .ref, + .shl, + .shl_sat, + .shr, + .str, + .sub, + .subwrap, + .sub_sat, + .negate, + .negate_wrap, + .typeof, + .typeof_builtin, + .xor, + .optional_type, + .optional_payload_safe, + .optional_payload_unsafe, + .optional_payload_safe_ptr, + .optional_payload_unsafe_ptr, + .err_union_payload_unsafe, + .err_union_payload_unsafe_ptr, + .err_union_code, + .err_union_code_ptr, + .ptr_type, + .enum_literal, + .merge_error_sets, + .error_union_type, + .bit_not, + .error_value, + .slice_start, + .slice_end, + .slice_sentinel, + .import, + .switch_block, + .switch_cond, + .switch_cond_ref, + .switch_capture, + .switch_capture_ref, + .switch_capture_multi, + .switch_capture_multi_ref, + .switch_capture_tag, + .struct_init_empty, + .struct_init, + .struct_init_ref, + .struct_init_anon, + .struct_init_anon_ref, + .array_init, + .array_init_anon, + .array_init_ref, + .array_init_anon_ref, + .union_init, + .field_type, + .field_type_ref, + .error_set_decl, + .error_set_decl_anon, + .error_set_decl_func, + .int_to_enum, + .enum_to_int, + .type_info, + .size_of, + .bit_size_of, + .typeof_log2_int_type, + .ptr_to_int, + .align_of, + .bool_to_int, + .embed_file, + .error_name, + .sqrt, + .sin, + .cos, + .tan, + .exp, + .exp2, + .log, + .log2, + .log10, + .fabs, + .floor, + .ceil, + .trunc, + .round, + .tag_name, + .type_name, + .frame_type, + .frame_size, + .float_to_int, + .int_to_float, + .int_to_ptr, + .float_cast, + .int_cast, + .ptr_cast, + .truncate, + .align_cast, + .has_decl, + .has_field, + .clz, + .ctz, + .pop_count, + .byte_swap, + .bit_reverse, + .div_exact, + .div_floor, + .div_trunc, + .mod, + .rem, + .shl_exact, + .shr_exact, + .bit_offset_of, + .offset_of, + .splat, + .reduce, + .shuffle, + .atomic_load, + .atomic_rmw, + .mul_add, + .field_parent_ptr, + .max, + .min, + .c_import, + .@"resume", + .@"await", + .ret_err_value_code, + .closure_get, + .array_base_ptr, + .field_base_ptr, + .ret_ptr, + .ret_type, + .for_len, + .@"try", + .try_ptr, + //.try_inline, + //.try_ptr_inline, + => break :b false, + + .extended => switch (gz.astgen.instructions.items(.data)[inst].extended.opcode) { + .breakpoint, + .fence, + .set_float_mode, + .set_align_stack, + .set_cold, + => break :b true, + else => break :b false, + }, + + // ZIR instructions that are always `noreturn`. + .@"break", + .break_inline, + .condbr, + .condbr_inline, + .compile_error, + .ret_node, + .ret_load, + .ret_implicit, + .ret_err_value, + .@"unreachable", + .repeat, + .repeat_inline, + .panic, + .panic_comptime, + .trap, + .check_comptime_control_flow, + => { + noreturn_src_node = statement; + break :b true; + }, + + // ZIR instructions that are always `void`. + .dbg_stmt, + .dbg_var_ptr, + .dbg_var_val, + .dbg_block_begin, + .dbg_block_end, + .ensure_result_used, + .ensure_result_non_error, + .ensure_err_union_payload_void, + .@"export", + .export_value, + .set_eval_branch_quota, + .atomic_store, + .store, + .store_node, + .store_to_block_ptr, + .store_to_inferred_ptr, + .resolve_inferred_alloc, + .validate_struct_init, + .validate_struct_init_comptime, + .validate_array_init, + .validate_array_init_comptime, + .set_runtime_safety, + .closure_capture, + .memcpy, + .memset, + .validate_array_init_ty, + .validate_struct_init_ty, + .validate_deref, + .save_err_ret_index, + .restore_err_ret_index, + => break :b true, + + .@"defer" => unreachable, + .defer_err_code => unreachable, + } + } else switch (maybe_unused_result) { + .none => unreachable, + + .unreachable_value => b: { + noreturn_src_node = statement; + break :b true; + }, + + .void_value => true, + + else => false, + }; + if (!elide_check) { + _ = try gz.addUnNode(.ensure_result_used, maybe_unused_result, statement); + } + return noreturn_src_node; +} + +fn countDefers(outer_scope: *Scope, inner_scope: *Scope) struct { + have_any: bool, + have_normal: bool, + have_err: bool, + need_err_code: bool, +} { + var have_normal = false; + var have_err = false; + var need_err_code = false; + var scope = inner_scope; + while (scope != outer_scope) { + switch (scope.tag) { + .gen_zir => scope = scope.cast(GenZir).?.parent, + .local_val => scope = scope.cast(Scope.LocalVal).?.parent, + .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, + .defer_normal => { + const defer_scope = scope.cast(Scope.Defer).?; + scope = defer_scope.parent; + + have_normal = true; + }, + .defer_error => { + const defer_scope = scope.cast(Scope.Defer).?; + scope = defer_scope.parent; + + have_err = true; + + const have_err_payload = defer_scope.remapped_err_code != 0; + need_err_code = need_err_code or have_err_payload; + }, + .namespace, .enum_namespace => unreachable, + .top => unreachable, + } + } + return .{ + .have_any = have_normal or have_err, + .have_normal = have_normal, + .have_err = have_err, + .need_err_code = need_err_code, + }; +} + +const DefersToEmit = union(enum) { + both: Zir.Inst.Ref, // err code + both_sans_err, + normal_only, +}; + +fn genDefers( + gz: *GenZir, + outer_scope: *Scope, + inner_scope: *Scope, + which_ones: DefersToEmit, +) InnerError!void { + const gpa = gz.astgen.gpa; + + var scope = inner_scope; + while (scope != outer_scope) { + switch (scope.tag) { + .gen_zir => scope = scope.cast(GenZir).?.parent, + .local_val => scope = scope.cast(Scope.LocalVal).?.parent, + .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, + .defer_normal => { + const defer_scope = scope.cast(Scope.Defer).?; + scope = defer_scope.parent; + try gz.addDefer(defer_scope.index, defer_scope.len); + }, + .defer_error => { + const defer_scope = scope.cast(Scope.Defer).?; + scope = defer_scope.parent; + switch (which_ones) { + .both_sans_err => { + try gz.addDefer(defer_scope.index, defer_scope.len); + }, + .both => |err_code| { + if (defer_scope.remapped_err_code == 0) { + try gz.addDefer(defer_scope.index, defer_scope.len); + } else { + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + + const payload_index = try gz.astgen.addExtra(Zir.Inst.DeferErrCode{ + .remapped_err_code = defer_scope.remapped_err_code, + .index = defer_scope.index, + .len = defer_scope.len, + }); + const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + gz.astgen.instructions.appendAssumeCapacity(.{ + .tag = .defer_err_code, + .data = .{ .defer_err_code = .{ + .err_code = err_code, + .payload_index = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + } + }, + .normal_only => continue, + } + }, + .namespace, .enum_namespace => unreachable, + .top => unreachable, + } + } +} + +fn checkUsed(gz: *GenZir, outer_scope: *Scope, inner_scope: *Scope) InnerError!void { + const astgen = gz.astgen; + + var scope = inner_scope; + while (scope != outer_scope) { + switch (scope.tag) { + .gen_zir => scope = scope.cast(GenZir).?.parent, + .local_val => { + const s = scope.cast(Scope.LocalVal).?; + if (s.used == 0 and s.discarded == 0) { + try astgen.appendErrorTok(s.token_src, "unused {s}", .{@tagName(s.id_cat)}); + } else if (s.used != 0 and s.discarded != 0) { + try astgen.appendErrorTokNotes(s.discarded, "pointless discard of {s}", .{@tagName(s.id_cat)}, &[_]u32{ + try gz.astgen.errNoteTok(s.used, "used here", .{}), + }); + } + scope = s.parent; + }, + .local_ptr => { + const s = scope.cast(Scope.LocalPtr).?; + if (s.used == 0 and s.discarded == 0) { + try astgen.appendErrorTok(s.token_src, "unused {s}", .{@tagName(s.id_cat)}); + } else if (s.used != 0 and s.discarded != 0) { + try astgen.appendErrorTokNotes(s.discarded, "pointless discard of {s}", .{@tagName(s.id_cat)}, &[_]u32{ + try gz.astgen.errNoteTok(s.used, "used here", .{}), + }); + } + scope = s.parent; + }, + .defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent, + .namespace, .enum_namespace => unreachable, + .top => unreachable, + } + } +} + +fn deferStmt( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + block_arena: Allocator, + scope_tag: Scope.Tag, +) InnerError!*Scope { + var defer_gen = gz.makeSubBlock(scope); + defer_gen.cur_defer_node = node; + defer_gen.any_defer_node = node; + defer defer_gen.unstack(); + + const tree = gz.astgen.tree; + const node_datas = tree.nodes.items(.data); + const expr_node = node_datas[node].rhs; + + const payload_token = node_datas[node].lhs; + var local_val_scope: Scope.LocalVal = undefined; + var remapped_err_code: Zir.Inst.Index = 0; + const have_err_code = scope_tag == .defer_error and payload_token != 0; + const sub_scope = if (!have_err_code) &defer_gen.base else blk: { + try gz.addDbgBlockBegin(); + const ident_name = try gz.astgen.identAsString(payload_token); + remapped_err_code = @intCast(u32, try gz.astgen.instructions.addOne(gz.astgen.gpa)); + const remapped_err_code_ref = Zir.indexToRef(remapped_err_code); + local_val_scope = .{ + .parent = &defer_gen.base, + .gen_zir = gz, + .name = ident_name, + .inst = remapped_err_code_ref, + .token_src = payload_token, + .id_cat = .capture, + }; + try gz.addDbgVar(.dbg_var_val, ident_name, remapped_err_code_ref); + break :blk &local_val_scope.base; + }; + _ = try unusedResultExpr(&defer_gen, sub_scope, expr_node); + try checkUsed(gz, scope, sub_scope); + if (have_err_code) try gz.addDbgBlockEnd(); + _ = try defer_gen.addBreak(.break_inline, 0, .void_value); + + const body = defer_gen.instructionsSlice(); + const body_len = gz.astgen.countBodyLenAfterFixups(body); + + const index = @intCast(u32, gz.astgen.extra.items.len); + try gz.astgen.extra.ensureUnusedCapacity(gz.astgen.gpa, body_len); + gz.astgen.appendBodyWithFixups(body); + + const defer_scope = try block_arena.create(Scope.Defer); + + defer_scope.* = .{ + .base = .{ .tag = scope_tag }, + .parent = scope, + .index = index, + .len = body_len, + .remapped_err_code = remapped_err_code, + }; + return &defer_scope.base; +} + +fn varDecl( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + block_arena: Allocator, + var_decl: Ast.full.VarDecl, +) InnerError!*Scope { + try emitDbgNode(gz, node); + const astgen = gz.astgen; + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + + const name_token = var_decl.ast.mut_token + 1; + const ident_name_raw = tree.tokenSlice(name_token); + if (mem.eql(u8, ident_name_raw, "_")) { + return astgen.failTok(name_token, "'_' used as an identifier without @\"_\" syntax", .{}); + } + const ident_name = try astgen.identAsString(name_token); + + try astgen.detectLocalShadowing( + scope, + ident_name, + name_token, + ident_name_raw, + if (token_tags[var_decl.ast.mut_token] == .keyword_const) .@"local constant" else .@"local variable", + ); + + if (var_decl.ast.init_node == 0) { + return astgen.failNode(node, "variables must be initialized", .{}); + } + + if (var_decl.ast.addrspace_node != 0) { + return astgen.failTok(main_tokens[var_decl.ast.addrspace_node], "cannot set address space of local variable '{s}'", .{ident_name_raw}); + } + + if (var_decl.ast.section_node != 0) { + return astgen.failTok(main_tokens[var_decl.ast.section_node], "cannot set section of local variable '{s}'", .{ident_name_raw}); + } + + const align_inst: Zir.Inst.Ref = if (var_decl.ast.align_node != 0) + try expr(gz, scope, align_ri, var_decl.ast.align_node) + else + .none; + + switch (token_tags[var_decl.ast.mut_token]) { + .keyword_const => { + if (var_decl.comptime_token) |comptime_token| { + try astgen.appendErrorTok(comptime_token, "'comptime const' is redundant; instead wrap the initialization expression with 'comptime'", .{}); + } + + // Depending on the type of AST the initialization expression is, we may need an lvalue + // or an rvalue as a result location. If it is an rvalue, we can use the instruction as + // the variable, no memory location needed. + const type_node = var_decl.ast.type_node; + if (align_inst == .none and + !nodeMayNeedMemoryLocation(tree, var_decl.ast.init_node, type_node != 0)) + { + const result_info: ResultInfo = if (type_node != 0) .{ + .rl = .{ .ty = try typeExpr(gz, scope, type_node) }, + .ctx = .const_init, + } else .{ .rl = .none, .ctx = .const_init }; + const prev_anon_name_strategy = gz.anon_name_strategy; + gz.anon_name_strategy = .dbg_var; + const init_inst = try reachableExpr(gz, scope, result_info, var_decl.ast.init_node, node); + gz.anon_name_strategy = prev_anon_name_strategy; + + try gz.addDbgVar(.dbg_var_val, ident_name, init_inst); + + // The const init expression may have modified the error return trace, so signal + // to Sema that it should save the new index for restoring later. + if (nodeMayAppendToErrorTrace(tree, var_decl.ast.init_node)) + _ = try gz.addSaveErrRetIndex(.{ .if_of_error_type = init_inst }); + + const sub_scope = try block_arena.create(Scope.LocalVal); + sub_scope.* = .{ + .parent = scope, + .gen_zir = gz, + .name = ident_name, + .inst = init_inst, + .token_src = name_token, + .id_cat = .@"local constant", + }; + return &sub_scope.base; + } + + const is_comptime = gz.force_comptime or + tree.nodes.items(.tag)[var_decl.ast.init_node] == .@"comptime"; + + // Detect whether the initialization expression actually uses the + // result location pointer. + var init_scope = gz.makeSubBlock(scope); + // we may add more instructions to gz before stacking init_scope + init_scope.instructions_top = GenZir.unstacked_top; + init_scope.anon_name_strategy = .dbg_var; + defer init_scope.unstack(); + + var resolve_inferred_alloc: Zir.Inst.Ref = .none; + var opt_type_inst: Zir.Inst.Ref = .none; + if (type_node != 0) { + const type_inst = try typeExpr(gz, &init_scope.base, type_node); + opt_type_inst = type_inst; + if (align_inst == .none) { + init_scope.instructions_top = gz.instructions.items.len; + init_scope.rl_ptr = try init_scope.addUnNode(.alloc, type_inst, node); + } else { + init_scope.rl_ptr = try gz.addAllocExtended(.{ + .node = node, + .type_inst = type_inst, + .align_inst = align_inst, + .is_const = true, + .is_comptime = is_comptime, + }); + init_scope.instructions_top = gz.instructions.items.len; + } + init_scope.rl_ty_inst = type_inst; + } else { + const alloc = if (align_inst == .none) alloc: { + init_scope.instructions_top = gz.instructions.items.len; + const tag: Zir.Inst.Tag = if (is_comptime) + .alloc_inferred_comptime + else + .alloc_inferred; + break :alloc try init_scope.addNode(tag, node); + } else alloc: { + const ref = try gz.addAllocExtended(.{ + .node = node, + .type_inst = .none, + .align_inst = align_inst, + .is_const = true, + .is_comptime = is_comptime, + }); + init_scope.instructions_top = gz.instructions.items.len; + break :alloc ref; + }; + resolve_inferred_alloc = alloc; + init_scope.rl_ptr = alloc; + init_scope.rl_ty_inst = .none; + } + const init_result_info: ResultInfo = .{ .rl = .{ .block_ptr = &init_scope }, .ctx = .const_init }; + const init_inst = try reachableExpr(&init_scope, &init_scope.base, init_result_info, var_decl.ast.init_node, node); + + // The const init expression may have modified the error return trace, so signal + // to Sema that it should save the new index for restoring later. + if (nodeMayAppendToErrorTrace(tree, var_decl.ast.init_node)) + _ = try init_scope.addSaveErrRetIndex(.{ .if_of_error_type = init_inst }); + + const zir_tags = astgen.instructions.items(.tag); + const zir_datas = astgen.instructions.items(.data); + + if (align_inst == .none and init_scope.rvalue_rl_count == 1) { + // Result location pointer not used. We don't need an alloc for this + // const local, and type inference becomes trivial. + // Implicitly move the init_scope instructions into the parent scope, + // then elide the alloc instruction and the store_to_block_ptr instruction. + var src = init_scope.instructions_top; + var dst = src; + init_scope.instructions_top = GenZir.unstacked_top; + while (src < gz.instructions.items.len) : (src += 1) { + const src_inst = gz.instructions.items[src]; + if (indexToRef(src_inst) == init_scope.rl_ptr) continue; + if (zir_tags[src_inst] == .store_to_block_ptr) { + if (zir_datas[src_inst].bin.lhs == init_scope.rl_ptr) continue; + } + gz.instructions.items[dst] = src_inst; + dst += 1; + } + gz.instructions.items.len = dst; + + // In case the result location did not do the coercion + // for us so we must do it here. + const coerced_init = if (opt_type_inst != .none) + try gz.addBin(.as, opt_type_inst, init_inst) + else + init_inst; + + try gz.addDbgVar(.dbg_var_val, ident_name, coerced_init); + + const sub_scope = try block_arena.create(Scope.LocalVal); + sub_scope.* = .{ + .parent = scope, + .gen_zir = gz, + .name = ident_name, + .inst = coerced_init, + .token_src = name_token, + .id_cat = .@"local constant", + }; + return &sub_scope.base; + } + // The initialization expression took advantage of the result location + // of the const local. In this case we will create an alloc and a LocalPtr for it. + // Implicitly move the init_scope instructions into the parent scope, then swap + // store_to_block_ptr for store_to_inferred_ptr. + + var src = init_scope.instructions_top; + init_scope.instructions_top = GenZir.unstacked_top; + while (src < gz.instructions.items.len) : (src += 1) { + const src_inst = gz.instructions.items[src]; + if (zir_tags[src_inst] == .store_to_block_ptr) { + if (zir_datas[src_inst].bin.lhs == init_scope.rl_ptr) { + if (type_node != 0) { + zir_tags[src_inst] = .store; + } else { + zir_tags[src_inst] = .store_to_inferred_ptr; + } + } + } + } + if (resolve_inferred_alloc != .none) { + _ = try gz.addUnNode(.resolve_inferred_alloc, resolve_inferred_alloc, node); + } + const const_ptr = try gz.addUnNode(.make_ptr_const, init_scope.rl_ptr, node); + + try gz.addDbgVar(.dbg_var_ptr, ident_name, const_ptr); + + const sub_scope = try block_arena.create(Scope.LocalPtr); + sub_scope.* = .{ + .parent = scope, + .gen_zir = gz, + .name = ident_name, + .ptr = const_ptr, + .token_src = name_token, + .maybe_comptime = true, + .id_cat = .@"local constant", + }; + return &sub_scope.base; + }, + .keyword_var => { + const old_rl_ty_inst = gz.rl_ty_inst; + defer gz.rl_ty_inst = old_rl_ty_inst; + + const is_comptime = var_decl.comptime_token != null or gz.force_comptime; + var resolve_inferred_alloc: Zir.Inst.Ref = .none; + const var_data: struct { + result_info: ResultInfo, + alloc: Zir.Inst.Ref, + } = if (var_decl.ast.type_node != 0) a: { + const type_inst = try typeExpr(gz, scope, var_decl.ast.type_node); + const alloc = alloc: { + if (align_inst == .none) { + const tag: Zir.Inst.Tag = if (is_comptime) + .alloc_comptime_mut + else + .alloc_mut; + break :alloc try gz.addUnNode(tag, type_inst, node); + } else { + break :alloc try gz.addAllocExtended(.{ + .node = node, + .type_inst = type_inst, + .align_inst = align_inst, + .is_const = false, + .is_comptime = is_comptime, + }); + } + }; + gz.rl_ty_inst = type_inst; + break :a .{ .alloc = alloc, .result_info = .{ .rl = .{ .ptr = .{ .inst = alloc } } } }; + } else a: { + const alloc = alloc: { + if (align_inst == .none) { + const tag: Zir.Inst.Tag = if (is_comptime) + .alloc_inferred_comptime_mut + else + .alloc_inferred_mut; + break :alloc try gz.addNode(tag, node); + } else { + break :alloc try gz.addAllocExtended(.{ + .node = node, + .type_inst = .none, + .align_inst = align_inst, + .is_const = false, + .is_comptime = is_comptime, + }); + } + }; + gz.rl_ty_inst = .none; + resolve_inferred_alloc = alloc; + break :a .{ .alloc = alloc, .result_info = .{ .rl = .{ .inferred_ptr = alloc } } }; + }; + const prev_anon_name_strategy = gz.anon_name_strategy; + gz.anon_name_strategy = .dbg_var; + _ = try reachableExprComptime(gz, scope, var_data.result_info, var_decl.ast.init_node, node, is_comptime); + gz.anon_name_strategy = prev_anon_name_strategy; + if (resolve_inferred_alloc != .none) { + _ = try gz.addUnNode(.resolve_inferred_alloc, resolve_inferred_alloc, node); + } + + try gz.addDbgVar(.dbg_var_ptr, ident_name, var_data.alloc); + + const sub_scope = try block_arena.create(Scope.LocalPtr); + sub_scope.* = .{ + .parent = scope, + .gen_zir = gz, + .name = ident_name, + .ptr = var_data.alloc, + .token_src = name_token, + .maybe_comptime = is_comptime, + .id_cat = .@"local variable", + }; + return &sub_scope.base; + }, + else => unreachable, + } +} + +fn emitDbgNode(gz: *GenZir, node: Ast.Node.Index) !void { + // The instruction emitted here is for debugging runtime code. + // If the current block will be evaluated only during semantic analysis + // then no dbg_stmt ZIR instruction is needed. + if (gz.force_comptime) return; + + const astgen = gz.astgen; + astgen.advanceSourceCursorToNode(node); + const line = astgen.source_line - gz.decl_line; + const column = astgen.source_column; + + if (gz.instructions.items.len > 0) { + const last = gz.instructions.items[gz.instructions.items.len - 1]; + const zir_tags = astgen.instructions.items(.tag); + if (zir_tags[last] == .dbg_stmt) { + const zir_datas = astgen.instructions.items(.data); + zir_datas[last].dbg_stmt = .{ + .line = line, + .column = column, + }; + return; + } + } + + _ = try gz.add(.{ .tag = .dbg_stmt, .data = .{ + .dbg_stmt = .{ + .line = line, + .column = column, + }, + } }); +} + +fn assign(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerError!void { + try emitDbgNode(gz, infix_node); + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); + const node_tags = tree.nodes.items(.tag); + + const lhs = node_datas[infix_node].lhs; + const rhs = node_datas[infix_node].rhs; + if (node_tags[lhs] == .identifier) { + // This intentionally does not support `@"_"` syntax. + const ident_name = tree.tokenSlice(main_tokens[lhs]); + if (mem.eql(u8, ident_name, "_")) { + _ = try expr(gz, scope, .{ .rl = .discard, .ctx = .assignment }, rhs); + return; + } + } + const lvalue = try lvalExpr(gz, scope, lhs); + _ = try expr(gz, scope, .{ .rl = .{ .ptr = .{ + .inst = lvalue, + .src_node = infix_node, + } } }, rhs); +} + +fn assignOp( + gz: *GenZir, + scope: *Scope, + infix_node: Ast.Node.Index, + op_inst_tag: Zir.Inst.Tag, +) InnerError!void { + try emitDbgNode(gz, infix_node); + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + + const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs); + + var line: u32 = undefined; + var column: u32 = undefined; + switch (op_inst_tag) { + .add, .sub, .mul, .div, .mod_rem => { + maybeAdvanceSourceCursorToMainToken(gz, infix_node); + line = gz.astgen.source_line - gz.decl_line; + column = gz.astgen.source_column; + }, + else => {}, + } + const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); + const lhs_type = try gz.addUnNode(.typeof, lhs, infix_node); + const rhs = try expr(gz, scope, .{ .rl = .{ .coerced_ty = lhs_type } }, node_datas[infix_node].rhs); + + switch (op_inst_tag) { + .add, .sub, .mul, .div, .mod_rem => { + try emitDbgStmt(gz, line, column); + }, + else => {}, + } + const result = try gz.addPlNode(op_inst_tag, infix_node, Zir.Inst.Bin{ + .lhs = lhs, + .rhs = rhs, + }); + _ = try gz.addBin(.store, lhs_ptr, result); +} + +fn assignShift( + gz: *GenZir, + scope: *Scope, + infix_node: Ast.Node.Index, + op_inst_tag: Zir.Inst.Tag, +) InnerError!void { + try emitDbgNode(gz, infix_node); + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + + const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs); + const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); + const rhs_type = try gz.addUnNode(.typeof_log2_int_type, lhs, infix_node); + const rhs = try expr(gz, scope, .{ .rl = .{ .ty = rhs_type } }, node_datas[infix_node].rhs); + + const result = try gz.addPlNode(op_inst_tag, infix_node, Zir.Inst.Bin{ + .lhs = lhs, + .rhs = rhs, + }); + _ = try gz.addBin(.store, lhs_ptr, result); +} + +fn assignShiftSat(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerError!void { + try emitDbgNode(gz, infix_node); + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + + const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs); + const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); + // Saturating shift-left allows any integer type for both the LHS and RHS. + const rhs = try expr(gz, scope, .{ .rl = .none }, node_datas[infix_node].rhs); + + const result = try gz.addPlNode(.shl_sat, infix_node, Zir.Inst.Bin{ + .lhs = lhs, + .rhs = rhs, + }); + _ = try gz.addBin(.store, lhs_ptr, result); +} + +fn ptrType( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + ptr_info: Ast.full.PtrType, +) InnerError!Zir.Inst.Ref { + if (ptr_info.size == .C and ptr_info.allowzero_token != null) { + return gz.astgen.failTok(ptr_info.allowzero_token.?, "C pointers always allow address zero", .{}); + } + + const source_offset = gz.astgen.source_offset; + const source_line = gz.astgen.source_line; + const source_column = gz.astgen.source_column; + const elem_type = try typeExpr(gz, scope, ptr_info.ast.child_type); + + var sentinel_ref: Zir.Inst.Ref = .none; + var align_ref: Zir.Inst.Ref = .none; + var addrspace_ref: Zir.Inst.Ref = .none; + var bit_start_ref: Zir.Inst.Ref = .none; + var bit_end_ref: Zir.Inst.Ref = .none; + var trailing_count: u32 = 0; + + if (ptr_info.ast.sentinel != 0) { + // These attributes can appear in any order and they all come before the + // element type so we need to reset the source cursor before generating them. + gz.astgen.source_offset = source_offset; + gz.astgen.source_line = source_line; + gz.astgen.source_column = source_column; + + sentinel_ref = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = elem_type } }, ptr_info.ast.sentinel); + trailing_count += 1; + } + if (ptr_info.ast.addrspace_node != 0) { + gz.astgen.source_offset = source_offset; + gz.astgen.source_line = source_line; + gz.astgen.source_column = source_column; + + addrspace_ref = try expr(gz, scope, .{ .rl = .{ .ty = .address_space_type } }, ptr_info.ast.addrspace_node); + trailing_count += 1; + } + if (ptr_info.ast.align_node != 0) { + gz.astgen.source_offset = source_offset; + gz.astgen.source_line = source_line; + gz.astgen.source_column = source_column; + + align_ref = try expr(gz, scope, coerced_align_ri, ptr_info.ast.align_node); + trailing_count += 1; + } + if (ptr_info.ast.bit_range_start != 0) { + assert(ptr_info.ast.bit_range_end != 0); + bit_start_ref = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u16_type } }, ptr_info.ast.bit_range_start); + bit_end_ref = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u16_type } }, ptr_info.ast.bit_range_end); + trailing_count += 2; + } + + const gpa = gz.astgen.gpa; + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.PtrType).Struct.fields.len + + trailing_count); + + const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.PtrType{ + .elem_type = elem_type, + .src_node = gz.nodeIndexToRelative(node), + }); + if (sentinel_ref != .none) { + gz.astgen.extra.appendAssumeCapacity(@enumToInt(sentinel_ref)); + } + if (align_ref != .none) { + gz.astgen.extra.appendAssumeCapacity(@enumToInt(align_ref)); + } + if (addrspace_ref != .none) { + gz.astgen.extra.appendAssumeCapacity(@enumToInt(addrspace_ref)); + } + if (bit_start_ref != .none) { + gz.astgen.extra.appendAssumeCapacity(@enumToInt(bit_start_ref)); + gz.astgen.extra.appendAssumeCapacity(@enumToInt(bit_end_ref)); + } + + const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const result = indexToRef(new_index); + gz.astgen.instructions.appendAssumeCapacity(.{ .tag = .ptr_type, .data = .{ + .ptr_type = .{ + .flags = .{ + .is_allowzero = ptr_info.allowzero_token != null, + .is_mutable = ptr_info.const_token == null, + .is_volatile = ptr_info.volatile_token != null, + .has_sentinel = sentinel_ref != .none, + .has_align = align_ref != .none, + .has_addrspace = addrspace_ref != .none, + .has_bit_range = bit_start_ref != .none, + }, + .size = ptr_info.size, + .payload_index = payload_index, + }, + } }); + gz.instructions.appendAssumeCapacity(new_index); + + return rvalue(gz, ri, result, node); +} + +fn arrayType(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) !Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + + const len_node = node_datas[node].lhs; + if (node_tags[len_node] == .identifier and + mem.eql(u8, tree.tokenSlice(main_tokens[len_node]), "_")) + { + return astgen.failNode(len_node, "unable to infer array size", .{}); + } + const len = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, len_node); + const elem_type = try typeExpr(gz, scope, node_datas[node].rhs); + + const result = try gz.addPlNode(.array_type, node, Zir.Inst.Bin{ + .lhs = len, + .rhs = elem_type, + }); + return rvalue(gz, ri, result, node); +} + +fn arrayTypeSentinel(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) !Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + const extra = tree.extraData(node_datas[node].rhs, Ast.Node.ArrayTypeSentinel); + + const len_node = node_datas[node].lhs; + if (node_tags[len_node] == .identifier and + mem.eql(u8, tree.tokenSlice(main_tokens[len_node]), "_")) + { + return astgen.failNode(len_node, "unable to infer array size", .{}); + } + const len = try reachableExpr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, len_node, node); + const elem_type = try typeExpr(gz, scope, extra.elem_type); + const sentinel = try reachableExprComptime(gz, scope, .{ .rl = .{ .coerced_ty = elem_type } }, extra.sentinel, node, true); + + const result = try gz.addPlNode(.array_type_sentinel, node, Zir.Inst.ArrayTypeSentinel{ + .len = len, + .elem_type = elem_type, + .sentinel = sentinel, + }); + return rvalue(gz, ri, result, node); +} + +const WipMembers = struct { + payload: *ArrayListUnmanaged(u32), + payload_top: usize, + decls_start: u32, + decls_end: u32, + field_bits_start: u32, + fields_start: u32, + fields_end: u32, + decl_index: u32 = 0, + field_index: u32 = 0, + + const Self = @This(); + /// struct, union, enum, and opaque decls all use same 4 bits per decl + const bits_per_decl = 4; + const decls_per_u32 = 32 / bits_per_decl; + /// struct, union, enum, and opaque decls all have maximum size of 11 u32 slots + /// (4 for src_hash + line + name + value + doc_comment + align + link_section + address_space ) + const max_decl_size = 11; + + fn init(gpa: Allocator, payload: *ArrayListUnmanaged(u32), decl_count: u32, field_count: u32, comptime bits_per_field: u32, comptime max_field_size: u32) Allocator.Error!Self { + const payload_top = @intCast(u32, payload.items.len); + const decls_start = payload_top + (decl_count + decls_per_u32 - 1) / decls_per_u32; + const field_bits_start = decls_start + decl_count * max_decl_size; + const fields_start = field_bits_start + if (bits_per_field > 0) blk: { + const fields_per_u32 = 32 / bits_per_field; + break :blk (field_count + fields_per_u32 - 1) / fields_per_u32; + } else 0; + const payload_end = fields_start + field_count * max_field_size; + try payload.resize(gpa, payload_end); + return Self{ + .payload = payload, + .payload_top = payload_top, + .decls_start = decls_start, + .field_bits_start = field_bits_start, + .fields_start = fields_start, + .decls_end = decls_start, + .fields_end = fields_start, + }; + } + + fn nextDecl(self: *Self, is_pub: bool, is_export: bool, has_align: bool, has_section_or_addrspace: bool) void { + const index = self.payload_top + self.decl_index / decls_per_u32; + assert(index < self.decls_start); + const bit_bag: u32 = if (self.decl_index % decls_per_u32 == 0) 0 else self.payload.items[index]; + self.payload.items[index] = (bit_bag >> bits_per_decl) | + (@as(u32, @boolToInt(is_pub)) << 28) | + (@as(u32, @boolToInt(is_export)) << 29) | + (@as(u32, @boolToInt(has_align)) << 30) | + (@as(u32, @boolToInt(has_section_or_addrspace)) << 31); + self.decl_index += 1; + } + + fn nextField(self: *Self, comptime bits_per_field: u32, bits: [bits_per_field]bool) void { + const fields_per_u32 = 32 / bits_per_field; + const index = self.field_bits_start + self.field_index / fields_per_u32; + assert(index < self.fields_start); + var bit_bag: u32 = if (self.field_index % fields_per_u32 == 0) 0 else self.payload.items[index]; + bit_bag >>= bits_per_field; + comptime var i = 0; + inline while (i < bits_per_field) : (i += 1) { + bit_bag |= @as(u32, @boolToInt(bits[i])) << (32 - bits_per_field + i); + } + self.payload.items[index] = bit_bag; + self.field_index += 1; + } + + fn appendToDecl(self: *Self, data: u32) void { + assert(self.decls_end < self.field_bits_start); + self.payload.items[self.decls_end] = data; + self.decls_end += 1; + } + + fn appendToDeclSlice(self: *Self, data: []const u32) void { + assert(self.decls_end + data.len <= self.field_bits_start); + mem.copy(u32, self.payload.items[self.decls_end..], data); + self.decls_end += @intCast(u32, data.len); + } + + fn appendToField(self: *Self, data: u32) void { + assert(self.fields_end < self.payload.items.len); + self.payload.items[self.fields_end] = data; + self.fields_end += 1; + } + + fn finishBits(self: *Self, comptime bits_per_field: u32) void { + const empty_decl_slots = decls_per_u32 - (self.decl_index % decls_per_u32); + if (self.decl_index > 0 and empty_decl_slots < decls_per_u32) { + const index = self.payload_top + self.decl_index / decls_per_u32; + self.payload.items[index] >>= @intCast(u5, empty_decl_slots * bits_per_decl); + } + if (bits_per_field > 0) { + const fields_per_u32 = 32 / bits_per_field; + const empty_field_slots = fields_per_u32 - (self.field_index % fields_per_u32); + if (self.field_index > 0 and empty_field_slots < fields_per_u32) { + const index = self.field_bits_start + self.field_index / fields_per_u32; + self.payload.items[index] >>= @intCast(u5, empty_field_slots * bits_per_field); + } + } + } + + fn declsSlice(self: *Self) []u32 { + return self.payload.items[self.payload_top..self.decls_end]; + } + + fn fieldsSlice(self: *Self) []u32 { + return self.payload.items[self.field_bits_start..self.fields_end]; + } + + fn deinit(self: *Self) void { + self.payload.items.len = self.payload_top; + } +}; + +fn fnDecl( + astgen: *AstGen, + gz: *GenZir, + scope: *Scope, + wip_members: *WipMembers, + decl_node: Ast.Node.Index, + body_node: Ast.Node.Index, + fn_proto: Ast.full.FnProto, +) InnerError!void { + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + + // missing function name already happened in scanDecls() + const fn_name_token = fn_proto.name_token orelse return error.AnalysisFail; + const fn_name_str_index = try astgen.identAsString(fn_name_token); + + // We insert this at the beginning so that its instruction index marks the + // start of the top level declaration. + const block_inst = try gz.makeBlockInst(.block_inline, fn_proto.ast.proto_node); + astgen.advanceSourceCursorToNode(decl_node); + + var decl_gz: GenZir = .{ + .force_comptime = true, + .decl_node_index = fn_proto.ast.proto_node, + .decl_line = astgen.source_line, + .parent = scope, + .astgen = astgen, + .instructions = gz.instructions, + .instructions_top = gz.instructions.items.len, + }; + defer decl_gz.unstack(); + + var fn_gz: GenZir = .{ + .force_comptime = false, + .decl_node_index = fn_proto.ast.proto_node, + .decl_line = decl_gz.decl_line, + .parent = &decl_gz.base, + .astgen = astgen, + .instructions = gz.instructions, + .instructions_top = GenZir.unstacked_top, + }; + defer fn_gz.unstack(); + + const is_pub = fn_proto.visib_token != null; + const is_export = blk: { + const maybe_export_token = fn_proto.extern_export_inline_token orelse break :blk false; + break :blk token_tags[maybe_export_token] == .keyword_export; + }; + const is_extern = blk: { + const maybe_extern_token = fn_proto.extern_export_inline_token orelse break :blk false; + break :blk token_tags[maybe_extern_token] == .keyword_extern; + }; + const has_inline_keyword = blk: { + const maybe_inline_token = fn_proto.extern_export_inline_token orelse break :blk false; + break :blk token_tags[maybe_inline_token] == .keyword_inline; + }; + const is_noinline = blk: { + const maybe_noinline_token = fn_proto.extern_export_inline_token orelse break :blk false; + break :blk token_tags[maybe_noinline_token] == .keyword_noinline; + }; + + const doc_comment_index = try astgen.docCommentAsString(fn_proto.firstToken()); + + // align, linksection, and addrspace is passed in the func instruction in this case. + wip_members.nextDecl(is_pub, is_export, false, false); + + var noalias_bits: u32 = 0; + var params_scope = &fn_gz.base; + const is_var_args = is_var_args: { + var param_type_i: usize = 0; + var it = fn_proto.iterate(tree); + while (it.next()) |param| : (param_type_i += 1) { + const is_comptime = if (param.comptime_noalias) |token| switch (token_tags[token]) { + .keyword_noalias => is_comptime: { + noalias_bits |= @as(u32, 1) << (std.math.cast(u5, param_type_i) orelse + return astgen.failTok(token, "this compiler implementation only supports 'noalias' on the first 32 parameters", .{})); + break :is_comptime false; + }, + .keyword_comptime => true, + else => false, + } else false; + + const is_anytype = if (param.anytype_ellipsis3) |token| blk: { + switch (token_tags[token]) { + .keyword_anytype => break :blk true, + .ellipsis3 => break :is_var_args true, + else => unreachable, + } + } else false; + + const param_name: u32 = if (param.name_token) |name_token| blk: { + const name_bytes = tree.tokenSlice(name_token); + if (mem.eql(u8, "_", name_bytes)) + break :blk 0; + + const param_name = try astgen.identAsString(name_token); + if (!is_extern) { + try astgen.detectLocalShadowing(params_scope, param_name, name_token, name_bytes, .@"function parameter"); + } + break :blk param_name; + } else if (!is_extern) { + if (param.anytype_ellipsis3) |tok| { + return astgen.failTok(tok, "missing parameter name", .{}); + } else { + ambiguous: { + if (tree.nodes.items(.tag)[param.type_expr] != .identifier) break :ambiguous; + const main_token = tree.nodes.items(.main_token)[param.type_expr]; + const identifier_str = tree.tokenSlice(main_token); + if (isPrimitive(identifier_str)) break :ambiguous; + return astgen.failNodeNotes( + param.type_expr, + "missing parameter name or type", + .{}, + &[_]u32{ + try astgen.errNoteNode( + param.type_expr, + "if this is a name, annotate its type '{s}: T'", + .{identifier_str}, + ), + try astgen.errNoteNode( + param.type_expr, + "if this is a type, give it a name ': {s}'", + .{identifier_str}, + ), + }, + ); + } + return astgen.failNode(param.type_expr, "missing parameter name", .{}); + } + } else 0; + + const param_inst = if (is_anytype) param: { + const name_token = param.name_token orelse param.anytype_ellipsis3.?; + const tag: Zir.Inst.Tag = if (is_comptime) + .param_anytype_comptime + else + .param_anytype; + break :param try decl_gz.addStrTok(tag, param_name, name_token); + } else param: { + const param_type_node = param.type_expr; + assert(param_type_node != 0); + var param_gz = decl_gz.makeSubBlock(scope); + defer param_gz.unstack(); + const param_type = try expr(¶m_gz, params_scope, coerced_type_ri, param_type_node); + const param_inst_expected = @intCast(u32, astgen.instructions.len + 1); + _ = try param_gz.addBreakWithSrcNode(.break_inline, param_inst_expected, param_type, param_type_node); + + const main_tokens = tree.nodes.items(.main_token); + const name_token = param.name_token orelse main_tokens[param_type_node]; + const tag: Zir.Inst.Tag = if (is_comptime) .param_comptime else .param; + const param_inst = try decl_gz.addParam(¶m_gz, tag, name_token, param_name, param.first_doc_comment); + assert(param_inst_expected == param_inst); + break :param indexToRef(param_inst); + }; + + if (param_name == 0 or is_extern) continue; + + const sub_scope = try astgen.arena.create(Scope.LocalVal); + sub_scope.* = .{ + .parent = params_scope, + .gen_zir = &decl_gz, + .name = param_name, + .inst = param_inst, + .token_src = param.name_token.?, + .id_cat = .@"function parameter", + }; + params_scope = &sub_scope.base; + } + break :is_var_args false; + }; + + const lib_name: u32 = if (fn_proto.lib_name) |lib_name_token| blk: { + const lib_name_str = try astgen.strLitAsString(lib_name_token); + const lib_name_slice = astgen.string_bytes.items[lib_name_str.index..][0..lib_name_str.len]; + if (mem.indexOfScalar(u8, lib_name_slice, 0) != null) { + return astgen.failTok(lib_name_token, "library name cannot contain null bytes", .{}); + } else if (lib_name_str.len == 0) { + return astgen.failTok(lib_name_token, "library name cannot be empty", .{}); + } + break :blk lib_name_str.index; + } else 0; + + const maybe_bang = tree.firstToken(fn_proto.ast.return_type) - 1; + const is_inferred_error = token_tags[maybe_bang] == .bang; + + // After creating the function ZIR instruction, it will need to update the break + // instructions inside the expression blocks for align, addrspace, cc, and ret_ty + // to use the function instruction as the "block" to break from. + + var align_gz = decl_gz.makeSubBlock(params_scope); + defer align_gz.unstack(); + const align_ref: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: { + const inst = try expr(&decl_gz, params_scope, coerced_align_ri, fn_proto.ast.align_expr); + if (align_gz.instructionsSlice().len == 0) { + // In this case we will send a len=0 body which can be encoded more efficiently. + break :inst inst; + } + _ = try align_gz.addBreak(.break_inline, 0, inst); + break :inst inst; + }; + + var addrspace_gz = decl_gz.makeSubBlock(params_scope); + defer addrspace_gz.unstack(); + const addrspace_ref: Zir.Inst.Ref = if (fn_proto.ast.addrspace_expr == 0) .none else inst: { + const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .address_space_type } }, fn_proto.ast.addrspace_expr); + if (addrspace_gz.instructionsSlice().len == 0) { + // In this case we will send a len=0 body which can be encoded more efficiently. + break :inst inst; + } + _ = try addrspace_gz.addBreak(.break_inline, 0, inst); + break :inst inst; + }; + + var section_gz = decl_gz.makeSubBlock(params_scope); + defer section_gz.unstack(); + const section_ref: Zir.Inst.Ref = if (fn_proto.ast.section_expr == 0) .none else inst: { + const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .const_slice_u8_type } }, fn_proto.ast.section_expr); + if (section_gz.instructionsSlice().len == 0) { + // In this case we will send a len=0 body which can be encoded more efficiently. + break :inst inst; + } + _ = try section_gz.addBreak(.break_inline, 0, inst); + break :inst inst; + }; + + var cc_gz = decl_gz.makeSubBlock(params_scope); + defer cc_gz.unstack(); + const cc_ref: Zir.Inst.Ref = blk: { + if (fn_proto.ast.callconv_expr != 0) { + if (has_inline_keyword) { + return astgen.failNode( + fn_proto.ast.callconv_expr, + "explicit callconv incompatible with inline keyword", + .{}, + ); + } + const inst = try expr( + &decl_gz, + params_scope, + .{ .rl = .{ .coerced_ty = .calling_convention_type } }, + fn_proto.ast.callconv_expr, + ); + if (cc_gz.instructionsSlice().len == 0) { + // In this case we will send a len=0 body which can be encoded more efficiently. + break :blk inst; + } + _ = try cc_gz.addBreak(.break_inline, 0, inst); + break :blk inst; + } else if (is_extern) { + // note: https://github.com/ziglang/zig/issues/5269 + break :blk .calling_convention_c; + } else if (has_inline_keyword) { + break :blk .calling_convention_inline; + } else { + break :blk .none; + } + }; + + var ret_gz = decl_gz.makeSubBlock(params_scope); + defer ret_gz.unstack(); + const ret_ref: Zir.Inst.Ref = inst: { + const inst = try expr(&ret_gz, params_scope, coerced_type_ri, fn_proto.ast.return_type); + if (ret_gz.instructionsSlice().len == 0) { + // In this case we will send a len=0 body which can be encoded more efficiently. + break :inst inst; + } + _ = try ret_gz.addBreak(.break_inline, 0, inst); + break :inst inst; + }; + + const func_inst: Zir.Inst.Ref = if (body_node == 0) func: { + if (!is_extern) { + return astgen.failTok(fn_proto.ast.fn_token, "non-extern function has no body", .{}); + } + if (is_inferred_error) { + return astgen.failTok(maybe_bang, "function prototype may not have inferred error set", .{}); + } + break :func try decl_gz.addFunc(.{ + .src_node = decl_node, + .cc_ref = cc_ref, + .cc_gz = &cc_gz, + .align_ref = align_ref, + .align_gz = &align_gz, + .ret_ref = ret_ref, + .ret_gz = &ret_gz, + .section_ref = section_ref, + .section_gz = §ion_gz, + .addrspace_ref = addrspace_ref, + .addrspace_gz = &addrspace_gz, + .param_block = block_inst, + .body_gz = null, + .lib_name = lib_name, + .is_var_args = is_var_args, + .is_inferred_error = false, + .is_test = false, + .is_extern = true, + .is_noinline = is_noinline, + .noalias_bits = noalias_bits, + }); + } else func: { + // as a scope, fn_gz encloses ret_gz, but for instruction list, fn_gz stacks on ret_gz + fn_gz.instructions_top = ret_gz.instructions.items.len; + + const prev_fn_block = astgen.fn_block; + astgen.fn_block = &fn_gz; + defer astgen.fn_block = prev_fn_block; + + const prev_var_args = astgen.fn_var_args; + astgen.fn_var_args = is_var_args; + defer astgen.fn_var_args = prev_var_args; + + astgen.advanceSourceCursorToNode(body_node); + const lbrace_line = astgen.source_line - decl_gz.decl_line; + const lbrace_column = astgen.source_column; + + _ = try expr(&fn_gz, params_scope, .{ .rl = .none }, body_node); + try checkUsed(gz, &fn_gz.base, params_scope); + + if (!fn_gz.endsWithNoReturn()) { + // As our last action before the return, "pop" the error trace if needed + _ = try gz.addRestoreErrRetIndex(.ret, .always); + + // Add implicit return at end of function. + _ = try fn_gz.addUnTok(.ret_implicit, .void_value, tree.lastToken(body_node)); + } + + break :func try decl_gz.addFunc(.{ + .src_node = decl_node, + .cc_ref = cc_ref, + .cc_gz = &cc_gz, + .align_ref = align_ref, + .align_gz = &align_gz, + .ret_ref = ret_ref, + .ret_gz = &ret_gz, + .section_ref = section_ref, + .section_gz = §ion_gz, + .addrspace_ref = addrspace_ref, + .addrspace_gz = &addrspace_gz, + .lbrace_line = lbrace_line, + .lbrace_column = lbrace_column, + .param_block = block_inst, + .body_gz = &fn_gz, + .lib_name = lib_name, + .is_var_args = is_var_args, + .is_inferred_error = is_inferred_error, + .is_test = false, + .is_extern = false, + .is_noinline = is_noinline, + .noalias_bits = noalias_bits, + }); + }; + + // We add this at the end so that its instruction index marks the end range + // of the top level declaration. addFunc already unstacked fn_gz and ret_gz. + _ = try decl_gz.addBreak(.break_inline, block_inst, func_inst); + try decl_gz.setBlockBody(block_inst); + + { + const contents_hash = std.zig.hashSrc(tree.getNodeSource(decl_node)); + const casted = @bitCast([4]u32, contents_hash); + wip_members.appendToDeclSlice(&casted); + } + { + const line_delta = decl_gz.decl_line - gz.decl_line; + wip_members.appendToDecl(line_delta); + } + wip_members.appendToDecl(fn_name_str_index); + wip_members.appendToDecl(block_inst); + wip_members.appendToDecl(doc_comment_index); +} + +fn globalVarDecl( + astgen: *AstGen, + gz: *GenZir, + scope: *Scope, + wip_members: *WipMembers, + node: Ast.Node.Index, + var_decl: Ast.full.VarDecl, +) InnerError!void { + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + + const is_mutable = token_tags[var_decl.ast.mut_token] == .keyword_var; + // We do this at the beginning so that the instruction index marks the range start + // of the top level declaration. + const block_inst = try gz.makeBlockInst(.block_inline, node); + + const name_token = var_decl.ast.mut_token + 1; + const name_str_index = try astgen.identAsString(name_token); + astgen.advanceSourceCursorToNode(node); + + var block_scope: GenZir = .{ + .parent = scope, + .decl_node_index = node, + .decl_line = astgen.source_line, + .astgen = astgen, + .force_comptime = true, + .anon_name_strategy = .parent, + .instructions = gz.instructions, + .instructions_top = gz.instructions.items.len, + }; + defer block_scope.unstack(); + + const is_pub = var_decl.visib_token != null; + const is_export = blk: { + const maybe_export_token = var_decl.extern_export_token orelse break :blk false; + break :blk token_tags[maybe_export_token] == .keyword_export; + }; + const is_extern = blk: { + const maybe_extern_token = var_decl.extern_export_token orelse break :blk false; + break :blk token_tags[maybe_extern_token] == .keyword_extern; + }; + const align_inst: Zir.Inst.Ref = if (var_decl.ast.align_node == 0) .none else inst: { + break :inst try expr(&block_scope, &block_scope.base, align_ri, var_decl.ast.align_node); + }; + const addrspace_inst: Zir.Inst.Ref = if (var_decl.ast.addrspace_node == 0) .none else inst: { + break :inst try expr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .address_space_type } }, var_decl.ast.addrspace_node); + }; + const section_inst: Zir.Inst.Ref = if (var_decl.ast.section_node == 0) .none else inst: { + break :inst try comptimeExpr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .const_slice_u8_type } }, var_decl.ast.section_node); + }; + const has_section_or_addrspace = section_inst != .none or addrspace_inst != .none; + wip_members.nextDecl(is_pub, is_export, align_inst != .none, has_section_or_addrspace); + + const is_threadlocal = if (var_decl.threadlocal_token) |tok| blk: { + if (!is_mutable) { + return astgen.failTok(tok, "threadlocal variable cannot be constant", .{}); + } + break :blk true; + } else false; + + const lib_name: u32 = if (var_decl.lib_name) |lib_name_token| blk: { + const lib_name_str = try astgen.strLitAsString(lib_name_token); + const lib_name_slice = astgen.string_bytes.items[lib_name_str.index..][0..lib_name_str.len]; + if (mem.indexOfScalar(u8, lib_name_slice, 0) != null) { + return astgen.failTok(lib_name_token, "library name cannot contain null bytes", .{}); + } else if (lib_name_str.len == 0) { + return astgen.failTok(lib_name_token, "library name cannot be empty", .{}); + } + break :blk lib_name_str.index; + } else 0; + + const doc_comment_index = try astgen.docCommentAsString(var_decl.firstToken()); + + assert(var_decl.comptime_token == null); // handled by parser + + const var_inst: Zir.Inst.Ref = if (var_decl.ast.init_node != 0) vi: { + if (is_extern) { + return astgen.failNode( + var_decl.ast.init_node, + "extern variables have no initializers", + .{}, + ); + } + + const type_inst: Zir.Inst.Ref = if (var_decl.ast.type_node != 0) + try expr( + &block_scope, + &block_scope.base, + .{ .rl = .{ .ty = .type_type } }, + var_decl.ast.type_node, + ) + else + .none; + + const init_inst = try expr( + &block_scope, + &block_scope.base, + if (type_inst != .none) .{ .rl = .{ .ty = type_inst } } else .{ .rl = .none }, + var_decl.ast.init_node, + ); + + if (is_mutable) { + const var_inst = try block_scope.addVar(.{ + .var_type = type_inst, + .lib_name = 0, + .align_inst = .none, // passed via the decls data + .init = init_inst, + .is_extern = false, + .is_threadlocal = is_threadlocal, + }); + break :vi var_inst; + } else { + break :vi init_inst; + } + } else if (!is_extern) { + return astgen.failNode(node, "variables must be initialized", .{}); + } else if (var_decl.ast.type_node != 0) vi: { + // Extern variable which has an explicit type. + const type_inst = try typeExpr(&block_scope, &block_scope.base, var_decl.ast.type_node); + + const var_inst = try block_scope.addVar(.{ + .var_type = type_inst, + .lib_name = lib_name, + .align_inst = .none, // passed via the decls data + .init = .none, + .is_extern = true, + .is_threadlocal = is_threadlocal, + }); + break :vi var_inst; + } else { + return astgen.failNode(node, "unable to infer variable type", .{}); + }; + // We do this at the end so that the instruction index marks the end + // range of a top level declaration. + _ = try block_scope.addBreakWithSrcNode(.break_inline, block_inst, var_inst, node); + try block_scope.setBlockBody(block_inst); + + { + const contents_hash = std.zig.hashSrc(tree.getNodeSource(node)); + const casted = @bitCast([4]u32, contents_hash); + wip_members.appendToDeclSlice(&casted); + } + { + const line_delta = block_scope.decl_line - gz.decl_line; + wip_members.appendToDecl(line_delta); + } + wip_members.appendToDecl(name_str_index); + wip_members.appendToDecl(block_inst); + wip_members.appendToDecl(doc_comment_index); // doc_comment wip + if (align_inst != .none) { + wip_members.appendToDecl(@enumToInt(align_inst)); + } + if (has_section_or_addrspace) { + wip_members.appendToDecl(@enumToInt(section_inst)); + wip_members.appendToDecl(@enumToInt(addrspace_inst)); + } +} + +fn comptimeDecl( + astgen: *AstGen, + gz: *GenZir, + scope: *Scope, + wip_members: *WipMembers, + node: Ast.Node.Index, +) InnerError!void { + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const body_node = node_datas[node].lhs; + + // Up top so the ZIR instruction index marks the start range of this + // top-level declaration. + const block_inst = try gz.makeBlockInst(.block_inline, node); + wip_members.nextDecl(false, false, false, false); + astgen.advanceSourceCursorToNode(node); + + var decl_block: GenZir = .{ + .force_comptime = true, + .decl_node_index = node, + .decl_line = astgen.source_line, + .parent = scope, + .astgen = astgen, + .instructions = gz.instructions, + .instructions_top = gz.instructions.items.len, + }; + defer decl_block.unstack(); + + const block_result = try expr(&decl_block, &decl_block.base, .{ .rl = .none }, body_node); + if (decl_block.isEmpty() or !decl_block.refIsNoReturn(block_result)) { + _ = try decl_block.addBreak(.break_inline, block_inst, .void_value); + } + try decl_block.setBlockBody(block_inst); + + { + const contents_hash = std.zig.hashSrc(tree.getNodeSource(node)); + const casted = @bitCast([4]u32, contents_hash); + wip_members.appendToDeclSlice(&casted); + } + { + const line_delta = decl_block.decl_line - gz.decl_line; + wip_members.appendToDecl(line_delta); + } + wip_members.appendToDecl(0); + wip_members.appendToDecl(block_inst); + wip_members.appendToDecl(0); // no doc comments on comptime decls +} + +fn usingnamespaceDecl( + astgen: *AstGen, + gz: *GenZir, + scope: *Scope, + wip_members: *WipMembers, + node: Ast.Node.Index, +) InnerError!void { + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + + const type_expr = node_datas[node].lhs; + const is_pub = blk: { + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + const main_token = main_tokens[node]; + break :blk (main_token > 0 and token_tags[main_token - 1] == .keyword_pub); + }; + // Up top so the ZIR instruction index marks the start range of this + // top-level declaration. + const block_inst = try gz.makeBlockInst(.block_inline, node); + wip_members.nextDecl(is_pub, true, false, false); + astgen.advanceSourceCursorToNode(node); + + var decl_block: GenZir = .{ + .force_comptime = true, + .decl_node_index = node, + .decl_line = astgen.source_line, + .parent = scope, + .astgen = astgen, + .instructions = gz.instructions, + .instructions_top = gz.instructions.items.len, + }; + defer decl_block.unstack(); + + const namespace_inst = try typeExpr(&decl_block, &decl_block.base, type_expr); + _ = try decl_block.addBreak(.break_inline, block_inst, namespace_inst); + try decl_block.setBlockBody(block_inst); + + { + const contents_hash = std.zig.hashSrc(tree.getNodeSource(node)); + const casted = @bitCast([4]u32, contents_hash); + wip_members.appendToDeclSlice(&casted); + } + { + const line_delta = decl_block.decl_line - gz.decl_line; + wip_members.appendToDecl(line_delta); + } + wip_members.appendToDecl(0); + wip_members.appendToDecl(block_inst); + wip_members.appendToDecl(0); // no doc comments on usingnamespace decls +} + +fn testDecl( + astgen: *AstGen, + gz: *GenZir, + scope: *Scope, + wip_members: *WipMembers, + node: Ast.Node.Index, +) InnerError!void { + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const body_node = node_datas[node].rhs; + + // Up top so the ZIR instruction index marks the start range of this + // top-level declaration. + const block_inst = try gz.makeBlockInst(.block_inline, node); + + wip_members.nextDecl(false, false, false, false); + astgen.advanceSourceCursorToNode(node); + + var decl_block: GenZir = .{ + .force_comptime = true, + .decl_node_index = node, + .decl_line = astgen.source_line, + .parent = scope, + .astgen = astgen, + .instructions = gz.instructions, + .instructions_top = gz.instructions.items.len, + }; + defer decl_block.unstack(); + + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + const test_token = main_tokens[node]; + const test_name_token = test_token + 1; + const test_name_token_tag = token_tags[test_name_token]; + const is_decltest = test_name_token_tag == .identifier; + const test_name: u32 = blk: { + if (test_name_token_tag == .string_literal) { + break :blk try astgen.testNameString(test_name_token); + } else if (test_name_token_tag == .identifier) { + const ident_name_raw = tree.tokenSlice(test_name_token); + + if (mem.eql(u8, ident_name_raw, "_")) return astgen.failTok(test_name_token, "'_' used as an identifier without @\"_\" syntax", .{}); + + // if not @"" syntax, just use raw token slice + if (ident_name_raw[0] != '@') { + if (isPrimitive(ident_name_raw)) return astgen.failTok(test_name_token, "cannot test a primitive", .{}); + } + + // Local variables, including function parameters. + const name_str_index = try astgen.identAsString(test_name_token); + var s = scope; + var found_already: ?Ast.Node.Index = null; // we have found a decl with the same name already + var num_namespaces_out: u32 = 0; + var capturing_namespace: ?*Scope.Namespace = null; + while (true) switch (s.tag) { + .local_val => { + const local_val = s.cast(Scope.LocalVal).?; + if (local_val.name == name_str_index) { + local_val.used = test_name_token; + return astgen.failTokNotes(test_name_token, "cannot test a {s}", .{ + @tagName(local_val.id_cat), + }, &[_]u32{ + try astgen.errNoteTok(local_val.token_src, "{s} declared here", .{ + @tagName(local_val.id_cat), + }), + }); + } + s = local_val.parent; + }, + .local_ptr => { + const local_ptr = s.cast(Scope.LocalPtr).?; + if (local_ptr.name == name_str_index) { + local_ptr.used = test_name_token; + return astgen.failTokNotes(test_name_token, "cannot test a {s}", .{ + @tagName(local_ptr.id_cat), + }, &[_]u32{ + try astgen.errNoteTok(local_ptr.token_src, "{s} declared here", .{ + @tagName(local_ptr.id_cat), + }), + }); + } + s = local_ptr.parent; + }, + .gen_zir => s = s.cast(GenZir).?.parent, + .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, + .namespace, .enum_namespace => { + const ns = s.cast(Scope.Namespace).?; + if (ns.decls.get(name_str_index)) |i| { + if (found_already) |f| { + return astgen.failTokNotes(test_name_token, "ambiguous reference", .{}, &.{ + try astgen.errNoteNode(f, "declared here", .{}), + try astgen.errNoteNode(i, "also declared here", .{}), + }); + } + // We found a match but must continue looking for ambiguous references to decls. + found_already = i; + } + num_namespaces_out += 1; + capturing_namespace = ns; + s = ns.parent; + }, + .top => break, + }; + if (found_already == null) { + const ident_name = try astgen.identifierTokenString(test_name_token); + return astgen.failTok(test_name_token, "use of undeclared identifier '{s}'", .{ident_name}); + } + + break :blk name_str_index; + } + // String table index 1 has a special meaning here of test decl with no name. + break :blk 1; + }; + + var fn_block: GenZir = .{ + .force_comptime = false, + .decl_node_index = node, + .decl_line = decl_block.decl_line, + .parent = &decl_block.base, + .astgen = astgen, + .instructions = decl_block.instructions, + .instructions_top = decl_block.instructions.items.len, + }; + defer fn_block.unstack(); + + const prev_fn_block = astgen.fn_block; + astgen.fn_block = &fn_block; + defer astgen.fn_block = prev_fn_block; + + astgen.advanceSourceCursorToNode(body_node); + const lbrace_line = astgen.source_line - decl_block.decl_line; + const lbrace_column = astgen.source_column; + + const block_result = try expr(&fn_block, &fn_block.base, .{ .rl = .none }, body_node); + if (fn_block.isEmpty() or !fn_block.refIsNoReturn(block_result)) { + + // As our last action before the return, "pop" the error trace if needed + _ = try gz.addRestoreErrRetIndex(.ret, .always); + + // Add implicit return at end of function. + _ = try fn_block.addUnTok(.ret_implicit, .void_value, tree.lastToken(body_node)); + } + + const func_inst = try decl_block.addFunc(.{ + .src_node = node, + + .cc_ref = .none, + .cc_gz = null, + .align_ref = .none, + .align_gz = null, + .ret_ref = .void_type, + .ret_gz = null, + .section_ref = .none, + .section_gz = null, + .addrspace_ref = .none, + .addrspace_gz = null, + + .lbrace_line = lbrace_line, + .lbrace_column = lbrace_column, + .param_block = block_inst, + .body_gz = &fn_block, + .lib_name = 0, + .is_var_args = false, + .is_inferred_error = true, + .is_test = true, + .is_extern = false, + .is_noinline = false, + .noalias_bits = 0, + }); + + _ = try decl_block.addBreak(.break_inline, block_inst, func_inst); + try decl_block.setBlockBody(block_inst); + + { + const contents_hash = std.zig.hashSrc(tree.getNodeSource(node)); + const casted = @bitCast([4]u32, contents_hash); + wip_members.appendToDeclSlice(&casted); + } + { + const line_delta = decl_block.decl_line - gz.decl_line; + wip_members.appendToDecl(line_delta); + } + if (is_decltest) + wip_members.appendToDecl(2) // 2 here means that it is a decltest, look at doc comment for name + else + wip_members.appendToDecl(test_name); + wip_members.appendToDecl(block_inst); + if (is_decltest) + wip_members.appendToDecl(test_name) // the doc comment on a decltest represents it's name + else + wip_members.appendToDecl(0); // no doc comments on test decls +} + +fn structDeclInner( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + container_decl: Ast.full.ContainerDecl, + layout: std.builtin.Type.ContainerLayout, + backing_int_node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const decl_inst = try gz.reserveInstructionIndex(); + + if (container_decl.ast.members.len == 0 and backing_int_node == 0) { + try gz.setStruct(decl_inst, .{ + .src_node = node, + .layout = layout, + .fields_len = 0, + .decls_len = 0, + .backing_int_ref = .none, + .backing_int_body_len = 0, + .known_non_opv = false, + .known_comptime_only = false, + .is_tuple = false, + }); + return indexToRef(decl_inst); + } + + const astgen = gz.astgen; + const gpa = astgen.gpa; + const tree = astgen.tree; + + var namespace: Scope.Namespace = .{ + .parent = scope, + .node = node, + .inst = decl_inst, + .declaring_gz = gz, + }; + defer namespace.deinit(gpa); + + // The struct_decl instruction introduces a scope in which the decls of the struct + // are in scope, so that field types, alignments, and default value expressions + // can refer to decls within the struct itself. + astgen.advanceSourceCursorToNode(node); + var block_scope: GenZir = .{ + .parent = &namespace.base, + .decl_node_index = node, + .decl_line = gz.decl_line, + .astgen = astgen, + .force_comptime = true, + .instructions = gz.instructions, + .instructions_top = gz.instructions.items.len, + }; + defer block_scope.unstack(); + + const scratch_top = astgen.scratch.items.len; + defer astgen.scratch.items.len = scratch_top; + + var backing_int_body_len: usize = 0; + const backing_int_ref: Zir.Inst.Ref = blk: { + if (backing_int_node != 0) { + if (layout != .Packed) { + return astgen.failNode(backing_int_node, "non-packed struct does not support backing integer type", .{}); + } else { + const backing_int_ref = try typeExpr(&block_scope, &namespace.base, backing_int_node); + if (!block_scope.isEmpty()) { + if (!block_scope.endsWithNoReturn()) { + _ = try block_scope.addBreak(.break_inline, decl_inst, backing_int_ref); + } + + const body = block_scope.instructionsSlice(); + const old_scratch_len = astgen.scratch.items.len; + try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body)); + appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); + backing_int_body_len = astgen.scratch.items.len - old_scratch_len; + block_scope.instructions.items.len = block_scope.instructions_top; + } + break :blk backing_int_ref; + } + } else { + break :blk .none; + } + }; + + const decl_count = try astgen.scanDecls(&namespace, container_decl.ast.members); + const field_count = @intCast(u32, container_decl.ast.members.len - decl_count); + + const bits_per_field = 4; + const max_field_size = 5; + var wip_members = try WipMembers.init(gpa, &astgen.scratch, decl_count, field_count, bits_per_field, max_field_size); + defer wip_members.deinit(); + + // We will use the scratch buffer, starting here, for the bodies: + // bodies: { // for every fields_len + // field_type_body_inst: Inst, // for each field_type_body_len + // align_body_inst: Inst, // for each align_body_len + // init_body_inst: Inst, // for each init_body_len + // } + // Note that the scratch buffer is simultaneously being used by WipMembers, however + // it will not access any elements beyond this point in the ArrayList. It also + // accesses via the ArrayList items field so it can handle the scratch buffer being + // reallocated. + // No defer needed here because it is handled by `wip_members.deinit()` above. + const bodies_start = astgen.scratch.items.len; + + var is_tuple = false; + const node_tags = tree.nodes.items(.tag); + for (container_decl.ast.members) |member_node| { + const container_field = tree.fullContainerField(member_node) orelse continue; + is_tuple = container_field.ast.tuple_like; + if (is_tuple) break; + } + if (is_tuple) for (container_decl.ast.members) |member_node| { + switch (node_tags[member_node]) { + .container_field_init, + .container_field_align, + .container_field, + .@"comptime", + .test_decl, + => continue, + else => { + const tuple_member = for (container_decl.ast.members) |maybe_tuple| switch (node_tags[maybe_tuple]) { + .container_field_init, + .container_field_align, + .container_field, + => break maybe_tuple, + else => {}, + } else unreachable; + return astgen.failNodeNotes( + member_node, + "tuple declarations cannot contain declarations", + .{}, + &[_]u32{ + try astgen.errNoteNode(tuple_member, "tuple field here", .{}), + }, + ); + }, + } + }; + + var known_non_opv = false; + var known_comptime_only = false; + for (container_decl.ast.members) |member_node| { + var member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) { + .decl => continue, + .field => |field| field, + }; + + if (!is_tuple) { + member.convertToNonTupleLike(astgen.tree.nodes); + assert(!member.ast.tuple_like); + + const field_name = try astgen.identAsString(member.ast.main_token); + wip_members.appendToField(field_name); + } else if (!member.ast.tuple_like) { + return astgen.failTok(member.ast.main_token, "tuple field has a name", .{}); + } + + const doc_comment_index = try astgen.docCommentAsString(member.firstToken()); + wip_members.appendToField(doc_comment_index); + + if (member.ast.type_expr == 0) { + return astgen.failTok(member.ast.main_token, "struct field missing type", .{}); + } + + const field_type = try typeExpr(&block_scope, &namespace.base, member.ast.type_expr); + const have_type_body = !block_scope.isEmpty(); + const have_align = member.ast.align_expr != 0; + const have_value = member.ast.value_expr != 0; + const is_comptime = member.comptime_token != null; + + if (is_comptime and layout == .Packed) { + return astgen.failTok(member.comptime_token.?, "packed struct fields cannot be marked comptime", .{}); + } else if (is_comptime and layout == .Extern) { + return astgen.failTok(member.comptime_token.?, "extern struct fields cannot be marked comptime", .{}); + } + + if (!is_comptime) { + known_non_opv = known_non_opv or + nodeImpliesMoreThanOnePossibleValue(tree, member.ast.type_expr); + known_comptime_only = known_comptime_only or + nodeImpliesComptimeOnly(tree, member.ast.type_expr); + } + wip_members.nextField(bits_per_field, .{ have_align, have_value, is_comptime, have_type_body }); + + if (have_type_body) { + if (!block_scope.endsWithNoReturn()) { + _ = try block_scope.addBreak(.break_inline, decl_inst, field_type); + } + const body = block_scope.instructionsSlice(); + const old_scratch_len = astgen.scratch.items.len; + try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body)); + appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); + wip_members.appendToField(@intCast(u32, astgen.scratch.items.len - old_scratch_len)); + block_scope.instructions.items.len = block_scope.instructions_top; + } else { + wip_members.appendToField(@enumToInt(field_type)); + } + + if (have_align) { + if (layout == .Packed) { + try astgen.appendErrorNode(member.ast.align_expr, "unable to override alignment of packed struct fields", .{}); + } + const align_ref = try expr(&block_scope, &namespace.base, coerced_align_ri, member.ast.align_expr); + if (!block_scope.endsWithNoReturn()) { + _ = try block_scope.addBreak(.break_inline, decl_inst, align_ref); + } + const body = block_scope.instructionsSlice(); + const old_scratch_len = astgen.scratch.items.len; + try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body)); + appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); + wip_members.appendToField(@intCast(u32, astgen.scratch.items.len - old_scratch_len)); + block_scope.instructions.items.len = block_scope.instructions_top; + } + + if (have_value) { + const ri: ResultInfo = .{ .rl = if (field_type == .none) .none else .{ .coerced_ty = field_type } }; + + const default_inst = try expr(&block_scope, &namespace.base, ri, member.ast.value_expr); + if (!block_scope.endsWithNoReturn()) { + _ = try block_scope.addBreak(.break_inline, decl_inst, default_inst); + } + const body = block_scope.instructionsSlice(); + const old_scratch_len = astgen.scratch.items.len; + try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body)); + appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); + wip_members.appendToField(@intCast(u32, astgen.scratch.items.len - old_scratch_len)); + block_scope.instructions.items.len = block_scope.instructions_top; + } else if (member.comptime_token) |comptime_token| { + return astgen.failTok(comptime_token, "comptime field without default initialization value", .{}); + } + } + + try gz.setStruct(decl_inst, .{ + .src_node = node, + .layout = layout, + .fields_len = field_count, + .decls_len = decl_count, + .backing_int_ref = backing_int_ref, + .backing_int_body_len = @intCast(u32, backing_int_body_len), + .known_non_opv = known_non_opv, + .known_comptime_only = known_comptime_only, + .is_tuple = is_tuple, + }); + + wip_members.finishBits(bits_per_field); + const decls_slice = wip_members.declsSlice(); + const fields_slice = wip_members.fieldsSlice(); + const bodies_slice = astgen.scratch.items[bodies_start..]; + try astgen.extra.ensureUnusedCapacity(gpa, backing_int_body_len + + decls_slice.len + fields_slice.len + bodies_slice.len); + astgen.extra.appendSliceAssumeCapacity(astgen.scratch.items[scratch_top..][0..backing_int_body_len]); + astgen.extra.appendSliceAssumeCapacity(decls_slice); + astgen.extra.appendSliceAssumeCapacity(fields_slice); + astgen.extra.appendSliceAssumeCapacity(bodies_slice); + + block_scope.unstack(); + try gz.addNamespaceCaptures(&namespace); + return indexToRef(decl_inst); +} + +fn unionDeclInner( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + members: []const Ast.Node.Index, + layout: std.builtin.Type.ContainerLayout, + arg_node: Ast.Node.Index, + auto_enum_tok: ?Ast.TokenIndex, +) InnerError!Zir.Inst.Ref { + const decl_inst = try gz.reserveInstructionIndex(); + + const astgen = gz.astgen; + const gpa = astgen.gpa; + + var namespace: Scope.Namespace = .{ + .parent = scope, + .node = node, + .inst = decl_inst, + .declaring_gz = gz, + }; + defer namespace.deinit(gpa); + + // The union_decl instruction introduces a scope in which the decls of the union + // are in scope, so that field types, alignments, and default value expressions + // can refer to decls within the union itself. + astgen.advanceSourceCursorToNode(node); + var block_scope: GenZir = .{ + .parent = &namespace.base, + .decl_node_index = node, + .decl_line = gz.decl_line, + .astgen = astgen, + .force_comptime = true, + .instructions = gz.instructions, + .instructions_top = gz.instructions.items.len, + }; + defer block_scope.unstack(); + + const decl_count = try astgen.scanDecls(&namespace, members); + const field_count = @intCast(u32, members.len - decl_count); + + if (layout != .Auto and (auto_enum_tok != null or arg_node != 0)) { + const layout_str = if (layout == .Extern) "extern" else "packed"; + if (arg_node != 0) { + return astgen.failNode(arg_node, "{s} union does not support enum tag type", .{layout_str}); + } else { + return astgen.failTok(auto_enum_tok.?, "{s} union does not support enum tag type", .{layout_str}); + } + } + + const arg_inst: Zir.Inst.Ref = if (arg_node != 0) + try typeExpr(&block_scope, &namespace.base, arg_node) + else + .none; + + const bits_per_field = 4; + const max_field_size = 5; + var wip_members = try WipMembers.init(gpa, &astgen.scratch, decl_count, field_count, bits_per_field, max_field_size); + defer wip_members.deinit(); + + for (members) |member_node| { + var member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) { + .decl => continue, + .field => |field| field, + }; + member.convertToNonTupleLike(astgen.tree.nodes); + if (member.ast.tuple_like) { + return astgen.failTok(member.ast.main_token, "union field missing name", .{}); + } + if (member.comptime_token) |comptime_token| { + return astgen.failTok(comptime_token, "union fields cannot be marked comptime", .{}); + } + + const field_name = try astgen.identAsString(member.ast.main_token); + wip_members.appendToField(field_name); + + const doc_comment_index = try astgen.docCommentAsString(member.firstToken()); + wip_members.appendToField(doc_comment_index); + + const have_type = member.ast.type_expr != 0; + const have_align = member.ast.align_expr != 0; + const have_value = member.ast.value_expr != 0; + const unused = false; + wip_members.nextField(bits_per_field, .{ have_type, have_align, have_value, unused }); + + if (have_type) { + const field_type = try typeExpr(&block_scope, &namespace.base, member.ast.type_expr); + wip_members.appendToField(@enumToInt(field_type)); + } else if (arg_inst == .none and auto_enum_tok == null) { + return astgen.failNode(member_node, "union field missing type", .{}); + } + if (have_align) { + const align_inst = try expr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .u32_type } }, member.ast.align_expr); + wip_members.appendToField(@enumToInt(align_inst)); + } + if (have_value) { + if (arg_inst == .none) { + return astgen.failNodeNotes( + node, + "explicitly valued tagged union missing integer tag type", + .{}, + &[_]u32{ + try astgen.errNoteNode( + member.ast.value_expr, + "tag value specified here", + .{}, + ), + }, + ); + } + if (auto_enum_tok == null) { + return astgen.failNodeNotes( + node, + "explicitly valued tagged union requires inferred enum tag type", + .{}, + &[_]u32{ + try astgen.errNoteNode( + member.ast.value_expr, + "tag value specified here", + .{}, + ), + }, + ); + } + const tag_value = try expr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = arg_inst } }, member.ast.value_expr); + wip_members.appendToField(@enumToInt(tag_value)); + } + } + + if (!block_scope.isEmpty()) { + _ = try block_scope.addBreak(.break_inline, decl_inst, .void_value); + } + + const body = block_scope.instructionsSlice(); + const body_len = astgen.countBodyLenAfterFixups(body); + + try gz.setUnion(decl_inst, .{ + .src_node = node, + .layout = layout, + .tag_type = arg_inst, + .body_len = body_len, + .fields_len = field_count, + .decls_len = decl_count, + .auto_enum_tag = auto_enum_tok != null, + }); + + wip_members.finishBits(bits_per_field); + const decls_slice = wip_members.declsSlice(); + const fields_slice = wip_members.fieldsSlice(); + try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len + body_len + fields_slice.len); + astgen.extra.appendSliceAssumeCapacity(decls_slice); + astgen.appendBodyWithFixups(body); + astgen.extra.appendSliceAssumeCapacity(fields_slice); + + block_scope.unstack(); + try gz.addNamespaceCaptures(&namespace); + return indexToRef(decl_inst); +} + +fn containerDecl( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + container_decl: Ast.full.ContainerDecl, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + + const prev_fn_block = astgen.fn_block; + astgen.fn_block = null; + defer astgen.fn_block = prev_fn_block; + + // We must not create any types until Sema. Here the goal is only to generate + // ZIR for all the field types, alignments, and default value expressions. + + switch (token_tags[container_decl.ast.main_token]) { + .keyword_struct => { + const layout = if (container_decl.layout_token) |t| switch (token_tags[t]) { + .keyword_packed => std.builtin.Type.ContainerLayout.Packed, + .keyword_extern => std.builtin.Type.ContainerLayout.Extern, + else => unreachable, + } else std.builtin.Type.ContainerLayout.Auto; + + const result = try structDeclInner(gz, scope, node, container_decl, layout, container_decl.ast.arg); + return rvalue(gz, ri, result, node); + }, + .keyword_union => { + const layout = if (container_decl.layout_token) |t| switch (token_tags[t]) { + .keyword_packed => std.builtin.Type.ContainerLayout.Packed, + .keyword_extern => std.builtin.Type.ContainerLayout.Extern, + else => unreachable, + } else std.builtin.Type.ContainerLayout.Auto; + + const result = try unionDeclInner(gz, scope, node, container_decl.ast.members, layout, container_decl.ast.arg, container_decl.ast.enum_token); + return rvalue(gz, ri, result, node); + }, + .keyword_enum => { + if (container_decl.layout_token) |t| { + return astgen.failTok(t, "enums do not support 'packed' or 'extern'; instead provide an explicit integer tag type", .{}); + } + // Count total fields as well as how many have explicitly provided tag values. + const counts = blk: { + var values: usize = 0; + var total_fields: usize = 0; + var decls: usize = 0; + var nonexhaustive_node: Ast.Node.Index = 0; + var nonfinal_nonexhaustive = false; + for (container_decl.ast.members) |member_node| { + var member = tree.fullContainerField(member_node) orelse { + decls += 1; + continue; + }; + member.convertToNonTupleLike(astgen.tree.nodes); + if (member.ast.tuple_like) { + return astgen.failTok(member.ast.main_token, "enum field missing name", .{}); + } + if (member.comptime_token) |comptime_token| { + return astgen.failTok(comptime_token, "enum fields cannot be marked comptime", .{}); + } + if (member.ast.type_expr != 0) { + return astgen.failNodeNotes( + member.ast.type_expr, + "enum fields do not have types", + .{}, + &[_]u32{ + try astgen.errNoteNode( + node, + "consider 'union(enum)' here to make it a tagged union", + .{}, + ), + }, + ); + } + if (member.ast.align_expr != 0) { + return astgen.failNode(member.ast.align_expr, "enum fields cannot be aligned", .{}); + } + + const name_token = member.ast.main_token; + if (mem.eql(u8, tree.tokenSlice(name_token), "_")) { + if (nonexhaustive_node != 0) { + return astgen.failNodeNotes( + member_node, + "redundant non-exhaustive enum mark", + .{}, + &[_]u32{ + try astgen.errNoteNode( + nonexhaustive_node, + "other mark here", + .{}, + ), + }, + ); + } + nonexhaustive_node = member_node; + if (member.ast.value_expr != 0) { + return astgen.failNode(member.ast.value_expr, "'_' is used to mark an enum as non-exhaustive and cannot be assigned a value", .{}); + } + continue; + } else if (nonexhaustive_node != 0) { + nonfinal_nonexhaustive = true; + } + total_fields += 1; + if (member.ast.value_expr != 0) { + if (container_decl.ast.arg == 0) { + return astgen.failNode(member.ast.value_expr, "value assigned to enum tag with inferred tag type", .{}); + } + values += 1; + } + } + if (nonfinal_nonexhaustive) { + return astgen.failNode(nonexhaustive_node, "'_' field of non-exhaustive enum must be last", .{}); + } + break :blk .{ + .total_fields = total_fields, + .values = values, + .decls = decls, + .nonexhaustive_node = nonexhaustive_node, + }; + }; + if (counts.nonexhaustive_node != 0 and container_decl.ast.arg == 0) { + try astgen.appendErrorNodeNotes( + node, + "non-exhaustive enum missing integer tag type", + .{}, + &[_]u32{ + try astgen.errNoteNode( + counts.nonexhaustive_node, + "marked non-exhaustive here", + .{}, + ), + }, + ); + } + // In this case we must generate ZIR code for the tag values, similar to + // how structs are handled above. + const nonexhaustive = counts.nonexhaustive_node != 0; + + const decl_inst = try gz.reserveInstructionIndex(); + + var namespace: Scope.Namespace = .{ + .parent = scope, + .node = node, + .inst = decl_inst, + .declaring_gz = gz, + }; + defer namespace.deinit(gpa); + + // The enum_decl instruction introduces a scope in which the decls of the enum + // are in scope, so that tag values can refer to decls within the enum itself. + astgen.advanceSourceCursorToNode(node); + var block_scope: GenZir = .{ + .parent = &namespace.base, + .decl_node_index = node, + .decl_line = gz.decl_line, + .astgen = astgen, + .force_comptime = true, + .instructions = gz.instructions, + .instructions_top = gz.instructions.items.len, + }; + defer block_scope.unstack(); + + _ = try astgen.scanDecls(&namespace, container_decl.ast.members); + namespace.base.tag = .enum_namespace; + + const arg_inst: Zir.Inst.Ref = if (container_decl.ast.arg != 0) + try comptimeExpr(&block_scope, &namespace.base, .{ .rl = .{ .ty = .type_type } }, container_decl.ast.arg) + else + .none; + + const bits_per_field = 1; + const max_field_size = 3; + var wip_members = try WipMembers.init(gpa, &astgen.scratch, @intCast(u32, counts.decls), @intCast(u32, counts.total_fields), bits_per_field, max_field_size); + defer wip_members.deinit(); + + for (container_decl.ast.members) |member_node| { + if (member_node == counts.nonexhaustive_node) + continue; + namespace.base.tag = .namespace; + var member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) { + .decl => continue, + .field => |field| field, + }; + member.convertToNonTupleLike(astgen.tree.nodes); + assert(member.comptime_token == null); + assert(member.ast.type_expr == 0); + assert(member.ast.align_expr == 0); + + const field_name = try astgen.identAsString(member.ast.main_token); + wip_members.appendToField(field_name); + + const doc_comment_index = try astgen.docCommentAsString(member.firstToken()); + wip_members.appendToField(doc_comment_index); + + const have_value = member.ast.value_expr != 0; + wip_members.nextField(bits_per_field, .{have_value}); + + if (have_value) { + if (arg_inst == .none) { + return astgen.failNodeNotes( + node, + "explicitly valued enum missing integer tag type", + .{}, + &[_]u32{ + try astgen.errNoteNode( + member.ast.value_expr, + "tag value specified here", + .{}, + ), + }, + ); + } + namespace.base.tag = .enum_namespace; + const tag_value_inst = try expr(&block_scope, &namespace.base, .{ .rl = .{ .ty = arg_inst } }, member.ast.value_expr); + wip_members.appendToField(@enumToInt(tag_value_inst)); + } + } + + if (!block_scope.isEmpty()) { + _ = try block_scope.addBreak(.break_inline, decl_inst, .void_value); + } + + const body = block_scope.instructionsSlice(); + const body_len = astgen.countBodyLenAfterFixups(body); + + try gz.setEnum(decl_inst, .{ + .src_node = node, + .nonexhaustive = nonexhaustive, + .tag_type = arg_inst, + .body_len = body_len, + .fields_len = @intCast(u32, counts.total_fields), + .decls_len = @intCast(u32, counts.decls), + }); + + wip_members.finishBits(bits_per_field); + const decls_slice = wip_members.declsSlice(); + const fields_slice = wip_members.fieldsSlice(); + try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len + body_len + fields_slice.len); + astgen.extra.appendSliceAssumeCapacity(decls_slice); + astgen.appendBodyWithFixups(body); + astgen.extra.appendSliceAssumeCapacity(fields_slice); + + block_scope.unstack(); + try gz.addNamespaceCaptures(&namespace); + return rvalue(gz, ri, indexToRef(decl_inst), node); + }, + .keyword_opaque => { + assert(container_decl.ast.arg == 0); + + const decl_inst = try gz.reserveInstructionIndex(); + + var namespace: Scope.Namespace = .{ + .parent = scope, + .node = node, + .inst = decl_inst, + .declaring_gz = gz, + }; + defer namespace.deinit(gpa); + + astgen.advanceSourceCursorToNode(node); + var block_scope: GenZir = .{ + .parent = &namespace.base, + .decl_node_index = node, + .decl_line = gz.decl_line, + .astgen = astgen, + .force_comptime = true, + .instructions = gz.instructions, + .instructions_top = gz.instructions.items.len, + }; + defer block_scope.unstack(); + + const decl_count = try astgen.scanDecls(&namespace, container_decl.ast.members); + + var wip_members = try WipMembers.init(gpa, &astgen.scratch, decl_count, 0, 0, 0); + defer wip_members.deinit(); + + for (container_decl.ast.members) |member_node| { + const res = try containerMember(&block_scope, &namespace.base, &wip_members, member_node); + if (res == .field) { + return astgen.failNode(member_node, "opaque types cannot have fields", .{}); + } + } + + try gz.setOpaque(decl_inst, .{ + .src_node = node, + .decls_len = decl_count, + }); + + wip_members.finishBits(0); + const decls_slice = wip_members.declsSlice(); + try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len); + astgen.extra.appendSliceAssumeCapacity(decls_slice); + + block_scope.unstack(); + try gz.addNamespaceCaptures(&namespace); + return rvalue(gz, ri, indexToRef(decl_inst), node); + }, + else => unreachable, + } +} + +const ContainerMemberResult = union(enum) { decl, field: Ast.full.ContainerField }; + +fn containerMember( + gz: *GenZir, + scope: *Scope, + wip_members: *WipMembers, + member_node: Ast.Node.Index, +) InnerError!ContainerMemberResult { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + switch (node_tags[member_node]) { + .container_field_init, + .container_field_align, + .container_field, + => return ContainerMemberResult{ .field = tree.fullContainerField(member_node).? }, + + .fn_proto, + .fn_proto_multi, + .fn_proto_one, + .fn_proto_simple, + .fn_decl, + => { + var buf: [1]Ast.Node.Index = undefined; + const full = tree.fullFnProto(&buf, member_node).?; + const body = if (node_tags[member_node] == .fn_decl) node_datas[member_node].rhs else 0; + + astgen.fnDecl(gz, scope, wip_members, member_node, body, full) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => {}, + }; + }, + + .global_var_decl, + .local_var_decl, + .simple_var_decl, + .aligned_var_decl, + => { + astgen.globalVarDecl(gz, scope, wip_members, member_node, tree.fullVarDecl(member_node).?) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => {}, + }; + }, + + .@"comptime" => { + astgen.comptimeDecl(gz, scope, wip_members, member_node) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => {}, + }; + }, + .@"usingnamespace" => { + astgen.usingnamespaceDecl(gz, scope, wip_members, member_node) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => {}, + }; + }, + .test_decl => { + astgen.testDecl(gz, scope, wip_members, member_node) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => {}, + }; + }, + else => unreachable, + } + return .decl; +} + +fn errorSetDecl(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + + const payload_index = try reserveExtra(astgen, @typeInfo(Zir.Inst.ErrorSetDecl).Struct.fields.len); + var fields_len: usize = 0; + { + var idents: std.AutoHashMapUnmanaged(u32, Ast.TokenIndex) = .{}; + defer idents.deinit(gpa); + + const error_token = main_tokens[node]; + var tok_i = error_token + 2; + while (true) : (tok_i += 1) { + switch (token_tags[tok_i]) { + .doc_comment, .comma => {}, + .identifier => { + const str_index = try astgen.identAsString(tok_i); + const gop = try idents.getOrPut(gpa, str_index); + if (gop.found_existing) { + const name = try gpa.dupe(u8, mem.span(astgen.nullTerminatedString(str_index))); + defer gpa.free(name); + return astgen.failTokNotes( + tok_i, + "duplicate error set field '{s}'", + .{name}, + &[_]u32{ + try astgen.errNoteTok( + gop.value_ptr.*, + "previous declaration here", + .{}, + ), + }, + ); + } + gop.value_ptr.* = tok_i; + + try astgen.extra.ensureUnusedCapacity(gpa, 2); + astgen.extra.appendAssumeCapacity(str_index); + const doc_comment_index = try astgen.docCommentAsString(tok_i); + astgen.extra.appendAssumeCapacity(doc_comment_index); + fields_len += 1; + }, + .r_brace => break, + else => unreachable, + } + } + } + + setExtra(astgen, payload_index, Zir.Inst.ErrorSetDecl{ + .fields_len = @intCast(u32, fields_len), + }); + const result = try gz.addPlNodePayloadIndex(.error_set_decl, node, payload_index); + return rvalue(gz, ri, result, node); +} + +fn tryExpr( + parent_gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + operand_node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = parent_gz.astgen; + + const fn_block = astgen.fn_block orelse { + return astgen.failNode(node, "'try' outside function scope", .{}); + }; + + if (parent_gz.any_defer_node != 0) { + return astgen.failNodeNotes(node, "'try' not allowed inside defer expression", .{}, &.{ + try astgen.errNoteNode( + parent_gz.any_defer_node, + "defer expression here", + .{}, + ), + }); + } + + // Ensure debug line/column information is emitted for this try expression. + // Then we will save the line/column so that we can emit another one that goes + // "backwards" because we want to evaluate the operand, but then put the debug + // info back at the try keyword for error return tracing. + if (!parent_gz.force_comptime) { + try emitDbgNode(parent_gz, node); + } + const try_line = astgen.source_line - parent_gz.decl_line; + const try_column = astgen.source_column; + + const operand_ri: ResultInfo = switch (ri.rl) { + .ref => .{ .rl = .ref, .ctx = .error_handling_expr }, + else => .{ .rl = .none, .ctx = .error_handling_expr }, + }; + // This could be a pointer or value depending on the `ri` parameter. + const operand = try reachableExpr(parent_gz, scope, operand_ri, operand_node, node); + const is_inline = parent_gz.force_comptime; + const is_inline_bit = @as(u2, @boolToInt(is_inline)); + const is_ptr_bit = @as(u2, @boolToInt(operand_ri.rl == .ref)) << 1; + const block_tag: Zir.Inst.Tag = switch (is_inline_bit | is_ptr_bit) { + 0b00 => .@"try", + 0b01 => .@"try", + //0b01 => .try_inline, + 0b10 => .try_ptr, + 0b11 => .try_ptr, + //0b11 => .try_ptr_inline, + }; + const try_inst = try parent_gz.makeBlockInst(block_tag, node); + try parent_gz.instructions.append(astgen.gpa, try_inst); + + var else_scope = parent_gz.makeSubBlock(scope); + defer else_scope.unstack(); + + const err_tag = switch (ri.rl) { + .ref => Zir.Inst.Tag.err_union_code_ptr, + else => Zir.Inst.Tag.err_union_code, + }; + const err_code = try else_scope.addUnNode(err_tag, operand, node); + try genDefers(&else_scope, &fn_block.base, scope, .{ .both = err_code }); + try emitDbgStmt(&else_scope, try_line, try_column); + _ = try else_scope.addUnNode(.ret_node, err_code, node); + + try else_scope.setTryBody(try_inst, operand); + const result = indexToRef(try_inst); + switch (ri.rl) { + .ref => return result, + else => return rvalue(parent_gz, ri, result, node), + } +} + +fn orelseCatchExpr( + parent_gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + lhs: Ast.Node.Index, + cond_op: Zir.Inst.Tag, + unwrap_op: Zir.Inst.Tag, + unwrap_code_op: Zir.Inst.Tag, + rhs: Ast.Node.Index, + payload_token: ?Ast.TokenIndex, +) InnerError!Zir.Inst.Ref { + const astgen = parent_gz.astgen; + const tree = astgen.tree; + + const do_err_trace = astgen.fn_block != null and (cond_op == .is_non_err or cond_op == .is_non_err_ptr); + + var block_scope = parent_gz.makeSubBlock(scope); + block_scope.setBreakResultInfo(ri); + defer block_scope.unstack(); + + const operand_ri: ResultInfo = switch (block_scope.break_result_info.rl) { + .ref => .{ .rl = .ref, .ctx = if (do_err_trace) .error_handling_expr else .none }, + else => .{ .rl = .none, .ctx = if (do_err_trace) .error_handling_expr else .none }, + }; + block_scope.break_count += 1; + // This could be a pointer or value depending on the `operand_ri` parameter. + // We cannot use `block_scope.break_result_info` because that has the bare + // type, whereas this expression has the optional type. Later we make + // up for this fact by calling rvalue on the else branch. + const operand = try reachableExpr(&block_scope, &block_scope.base, operand_ri, lhs, rhs); + const cond = try block_scope.addUnNode(cond_op, operand, node); + const condbr_tag: Zir.Inst.Tag = if (parent_gz.force_comptime) .condbr_inline else .condbr; + const condbr = try block_scope.addCondBr(condbr_tag, node); + + const block_tag: Zir.Inst.Tag = if (parent_gz.force_comptime) .block_inline else .block; + const block = try parent_gz.makeBlockInst(block_tag, node); + try block_scope.setBlockBody(block); + // block_scope unstacked now, can add new instructions to parent_gz + try parent_gz.instructions.append(astgen.gpa, block); + + var then_scope = block_scope.makeSubBlock(scope); + defer then_scope.unstack(); + + // This could be a pointer or value depending on `unwrap_op`. + const unwrapped_payload = try then_scope.addUnNode(unwrap_op, operand, node); + const then_result = switch (ri.rl) { + .ref => unwrapped_payload, + else => try rvalue(&then_scope, block_scope.break_result_info, unwrapped_payload, node), + }; + + var else_scope = block_scope.makeSubBlock(scope); + defer else_scope.unstack(); + + // We know that the operand (almost certainly) modified the error return trace, + // so signal to Sema that it should save the new index for restoring later. + if (do_err_trace and nodeMayAppendToErrorTrace(tree, lhs)) + _ = try else_scope.addSaveErrRetIndex(.always); + + var err_val_scope: Scope.LocalVal = undefined; + const else_sub_scope = blk: { + const payload = payload_token orelse break :blk &else_scope.base; + const err_str = tree.tokenSlice(payload); + if (mem.eql(u8, err_str, "_")) { + return astgen.failTok(payload, "discard of error capture; omit it instead", .{}); + } + const err_name = try astgen.identAsString(payload); + + try astgen.detectLocalShadowing(scope, err_name, payload, err_str, .capture); + + err_val_scope = .{ + .parent = &else_scope.base, + .gen_zir = &else_scope, + .name = err_name, + .inst = try else_scope.addUnNode(unwrap_code_op, operand, node), + .token_src = payload, + .id_cat = .capture, + }; + break :blk &err_val_scope.base; + }; + + const else_result = try expr(&else_scope, else_sub_scope, block_scope.break_result_info, rhs); + if (!else_scope.endsWithNoReturn()) { + block_scope.break_count += 1; + + // As our last action before the break, "pop" the error trace if needed + if (do_err_trace) + try restoreErrRetIndex(&else_scope, .{ .block = block }, block_scope.break_result_info, rhs, else_result); + } + try checkUsed(parent_gz, &else_scope.base, else_sub_scope); + + // We hold off on the break instructions as well as copying the then/else + // instructions into place until we know whether to keep store_to_block_ptr + // instructions or not. + + const break_tag: Zir.Inst.Tag = if (parent_gz.force_comptime) .break_inline else .@"break"; + const result = try finishThenElseBlock( + parent_gz, + ri, + node, + &block_scope, + &then_scope, + &else_scope, + condbr, + cond, + then_result, + node, + else_result, + rhs, + block, + block, + break_tag, + ); + return result; +} + +/// Supports `else_scope` stacked on `then_scope` stacked on `block_scope`. Unstacks `else_scope` then `then_scope`. +fn finishThenElseBlock( + parent_gz: *GenZir, + ri: ResultInfo, + node: Ast.Node.Index, + block_scope: *GenZir, + then_scope: *GenZir, + else_scope: *GenZir, + condbr: Zir.Inst.Index, + cond: Zir.Inst.Ref, + then_result: Zir.Inst.Ref, + then_src_node: Ast.Node.Index, + else_result: Zir.Inst.Ref, + else_src_node: Ast.Node.Index, + main_block: Zir.Inst.Index, + then_break_block: Zir.Inst.Index, + break_tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + // We now have enough information to decide whether the result instruction should + // be communicated via result location pointer or break instructions. + const strat = ri.rl.strategy(block_scope); + // else_scope may be stacked on then_scope, so check for no-return on then_scope manually + const tags = parent_gz.astgen.instructions.items(.tag); + const then_slice = then_scope.instructionsSliceUpto(else_scope); + const then_no_return = then_slice.len > 0 and tags[then_slice[then_slice.len - 1]].isNoReturn(); + const else_no_return = else_scope.endsWithNoReturn(); + + switch (strat.tag) { + .break_void => { + const then_break = if (!then_no_return) try then_scope.makeBreak(break_tag, then_break_block, .void_value) else 0; + const else_break = if (!else_no_return) try else_scope.makeBreak(break_tag, main_block, .void_value) else 0; + assert(!strat.elide_store_to_block_ptr_instructions); + try setCondBrPayload(condbr, cond, then_scope, then_break, else_scope, else_break); + return indexToRef(main_block); + }, + .break_operand => { + const then_break = if (!then_no_return) try then_scope.makeBreakWithSrcNode(break_tag, then_break_block, then_result, then_src_node) else 0; + const else_break = if (else_result == .none) + try else_scope.makeBreak(break_tag, main_block, .void_value) + else if (!else_no_return) + try else_scope.makeBreakWithSrcNode(break_tag, main_block, else_result, else_src_node) + else + 0; + + if (strat.elide_store_to_block_ptr_instructions) { + try setCondBrPayloadElideBlockStorePtr(condbr, cond, then_scope, then_break, else_scope, else_break, block_scope.rl_ptr); + } else { + try setCondBrPayload(condbr, cond, then_scope, then_break, else_scope, else_break); + } + const block_ref = indexToRef(main_block); + switch (ri.rl) { + .ref => return block_ref, + else => return rvalue(parent_gz, ri, block_ref, node), + } + }, + } +} + +/// Return whether the identifier names of two tokens are equal. Resolves @"" +/// tokens without allocating. +/// OK in theory it could do it without allocating. This implementation +/// allocates when the @"" form is used. +fn tokenIdentEql(astgen: *AstGen, token1: Ast.TokenIndex, token2: Ast.TokenIndex) !bool { + const ident_name_1 = try astgen.identifierTokenString(token1); + const ident_name_2 = try astgen.identifierTokenString(token2); + return mem.eql(u8, ident_name_1, ident_name_2); +} + +fn fieldAccess( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + switch (ri.rl) { + .ref => return addFieldAccess(.field_ptr, gz, scope, .{ .rl = .ref }, node), + else => { + const access = try addFieldAccess(.field_val, gz, scope, .{ .rl = .none }, node); + return rvalue(gz, ri, access, node); + }, + } +} + +fn addFieldAccess( + tag: Zir.Inst.Tag, + gz: *GenZir, + scope: *Scope, + lhs_ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const node_datas = tree.nodes.items(.data); + + const object_node = node_datas[node].lhs; + const dot_token = main_tokens[node]; + const field_ident = dot_token + 1; + const str_index = try astgen.identAsString(field_ident); + const lhs = try expr(gz, scope, lhs_ri, object_node); + + maybeAdvanceSourceCursorToMainToken(gz, node); + const line = gz.astgen.source_line - gz.decl_line; + const column = gz.astgen.source_column; + try emitDbgStmt(gz, line, column); + + return gz.addPlNode(tag, node, Zir.Inst.Field{ + .lhs = lhs, + .field_name_start = str_index, + }); +} + +fn arrayAccess( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const tree = gz.astgen.tree; + const node_datas = tree.nodes.items(.data); + switch (ri.rl) { + .ref => { + const lhs = try expr(gz, scope, .{ .rl = .ref }, node_datas[node].lhs); + + maybeAdvanceSourceCursorToMainToken(gz, node); + const line = gz.astgen.source_line - gz.decl_line; + const column = gz.astgen.source_column; + + const rhs = try expr(gz, scope, .{ .rl = .{ .ty = .usize_type } }, node_datas[node].rhs); + try emitDbgStmt(gz, line, column); + + return gz.addPlNode(.elem_ptr_node, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs }); + }, + else => { + const lhs = try expr(gz, scope, .{ .rl = .none }, node_datas[node].lhs); + + maybeAdvanceSourceCursorToMainToken(gz, node); + const line = gz.astgen.source_line - gz.decl_line; + const column = gz.astgen.source_column; + + const rhs = try expr(gz, scope, .{ .rl = .{ .ty = .usize_type } }, node_datas[node].rhs); + try emitDbgStmt(gz, line, column); + + return rvalue(gz, ri, try gz.addPlNode(.elem_val_node, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs }), node); + }, + } +} + +fn simpleBinOp( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + op_inst_tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + + if (op_inst_tag == .cmp_neq or op_inst_tag == .cmp_eq) { + const node_tags = tree.nodes.items(.tag); + const str = if (op_inst_tag == .cmp_eq) "==" else "!="; + if (node_tags[node_datas[node].lhs] == .string_literal or + node_tags[node_datas[node].rhs] == .string_literal) + return astgen.failNode(node, "cannot compare strings with {s}", .{str}); + } + + const lhs = try reachableExpr(gz, scope, .{ .rl = .none }, node_datas[node].lhs, node); + var line: u32 = undefined; + var column: u32 = undefined; + switch (op_inst_tag) { + .add, .sub, .mul, .div, .mod_rem => { + maybeAdvanceSourceCursorToMainToken(gz, node); + line = gz.astgen.source_line - gz.decl_line; + column = gz.astgen.source_column; + }, + else => {}, + } + const rhs = try reachableExpr(gz, scope, .{ .rl = .none }, node_datas[node].rhs, node); + + switch (op_inst_tag) { + .add, .sub, .mul, .div, .mod_rem => { + try emitDbgStmt(gz, line, column); + }, + else => {}, + } + const result = try gz.addPlNode(op_inst_tag, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs }); + return rvalue(gz, ri, result, node); +} + +fn simpleStrTok( + gz: *GenZir, + ri: ResultInfo, + ident_token: Ast.TokenIndex, + node: Ast.Node.Index, + op_inst_tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const str_index = try astgen.identAsString(ident_token); + const result = try gz.addStrTok(op_inst_tag, str_index, ident_token); + return rvalue(gz, ri, result, node); +} + +fn boolBinOp( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + zir_tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + + const lhs = try expr(gz, scope, bool_ri, node_datas[node].lhs); + const bool_br = try gz.addBoolBr(zir_tag, lhs); + + var rhs_scope = gz.makeSubBlock(scope); + defer rhs_scope.unstack(); + const rhs = try expr(&rhs_scope, &rhs_scope.base, bool_ri, node_datas[node].rhs); + if (!gz.refIsNoReturn(rhs)) { + _ = try rhs_scope.addBreakWithSrcNode(.break_inline, bool_br, rhs, node_datas[node].rhs); + } + try rhs_scope.setBoolBrBody(bool_br); + + const block_ref = indexToRef(bool_br); + return rvalue(gz, ri, block_ref, node); +} + +fn ifExpr( + parent_gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + if_full: Ast.full.If, +) InnerError!Zir.Inst.Ref { + const astgen = parent_gz.astgen; + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + + const do_err_trace = astgen.fn_block != null and if_full.error_token != null; + + var block_scope = parent_gz.makeSubBlock(scope); + block_scope.setBreakResultInfo(ri); + defer block_scope.unstack(); + + const payload_is_ref = if (if_full.payload_token) |payload_token| + token_tags[payload_token] == .asterisk + else + false; + + try emitDbgNode(parent_gz, if_full.ast.cond_expr); + const cond: struct { + inst: Zir.Inst.Ref, + bool_bit: Zir.Inst.Ref, + } = c: { + if (if_full.error_token) |_| { + const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none, .ctx = .error_handling_expr }; + const err_union = try expr(&block_scope, &block_scope.base, cond_ri, if_full.ast.cond_expr); + const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_err_ptr else .is_non_err; + break :c .{ + .inst = err_union, + .bool_bit = try block_scope.addUnNode(tag, err_union, if_full.ast.cond_expr), + }; + } else if (if_full.payload_token) |_| { + const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none }; + const optional = try expr(&block_scope, &block_scope.base, cond_ri, if_full.ast.cond_expr); + const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_null_ptr else .is_non_null; + break :c .{ + .inst = optional, + .bool_bit = try block_scope.addUnNode(tag, optional, if_full.ast.cond_expr), + }; + } else { + const cond = try expr(&block_scope, &block_scope.base, bool_ri, if_full.ast.cond_expr); + break :c .{ + .inst = cond, + .bool_bit = cond, + }; + } + }; + + const condbr_tag: Zir.Inst.Tag = if (parent_gz.force_comptime) .condbr_inline else .condbr; + const condbr = try block_scope.addCondBr(condbr_tag, node); + + const block_tag: Zir.Inst.Tag = if (parent_gz.force_comptime) .block_inline else .block; + const block = try parent_gz.makeBlockInst(block_tag, node); + try block_scope.setBlockBody(block); + // block_scope unstacked now, can add new instructions to parent_gz + try parent_gz.instructions.append(astgen.gpa, block); + + var then_scope = parent_gz.makeSubBlock(scope); + defer then_scope.unstack(); + + var payload_val_scope: Scope.LocalVal = undefined; + + try then_scope.addDbgBlockBegin(); + const then_node = if_full.ast.then_expr; + const then_sub_scope = s: { + if (if_full.error_token != null) { + if (if_full.payload_token) |payload_token| { + const tag: Zir.Inst.Tag = if (payload_is_ref) + .err_union_payload_unsafe_ptr + else + .err_union_payload_unsafe; + const payload_inst = try then_scope.addUnNode(tag, cond.inst, then_node); + const token_name_index = payload_token + @boolToInt(payload_is_ref); + const ident_name = try astgen.identAsString(token_name_index); + const token_name_str = tree.tokenSlice(token_name_index); + if (mem.eql(u8, "_", token_name_str)) + break :s &then_scope.base; + try astgen.detectLocalShadowing(&then_scope.base, ident_name, token_name_index, token_name_str, .capture); + payload_val_scope = .{ + .parent = &then_scope.base, + .gen_zir = &then_scope, + .name = ident_name, + .inst = payload_inst, + .token_src = payload_token, + .id_cat = .capture, + }; + try then_scope.addDbgVar(.dbg_var_val, ident_name, payload_inst); + break :s &payload_val_scope.base; + } else { + _ = try then_scope.addUnNode(.ensure_err_union_payload_void, cond.inst, node); + break :s &then_scope.base; + } + } else if (if_full.payload_token) |payload_token| { + const ident_token = if (payload_is_ref) payload_token + 1 else payload_token; + const tag: Zir.Inst.Tag = if (payload_is_ref) + .optional_payload_unsafe_ptr + else + .optional_payload_unsafe; + const ident_bytes = tree.tokenSlice(ident_token); + if (mem.eql(u8, "_", ident_bytes)) + break :s &then_scope.base; + const payload_inst = try then_scope.addUnNode(tag, cond.inst, then_node); + const ident_name = try astgen.identAsString(ident_token); + try astgen.detectLocalShadowing(&then_scope.base, ident_name, ident_token, ident_bytes, .capture); + payload_val_scope = .{ + .parent = &then_scope.base, + .gen_zir = &then_scope, + .name = ident_name, + .inst = payload_inst, + .token_src = ident_token, + .id_cat = .capture, + }; + try then_scope.addDbgVar(.dbg_var_val, ident_name, payload_inst); + break :s &payload_val_scope.base; + } else { + break :s &then_scope.base; + } + }; + + const then_result = try expr(&then_scope, then_sub_scope, block_scope.break_result_info, then_node); + if (!then_scope.endsWithNoReturn()) { + block_scope.break_count += 1; + } + try checkUsed(parent_gz, &then_scope.base, then_sub_scope); + try then_scope.addDbgBlockEnd(); + // We hold off on the break instructions as well as copying the then/else + // instructions into place until we know whether to keep store_to_block_ptr + // instructions or not. + + var else_scope = parent_gz.makeSubBlock(scope); + defer else_scope.unstack(); + + // We know that the operand (almost certainly) modified the error return trace, + // so signal to Sema that it should save the new index for restoring later. + if (do_err_trace and nodeMayAppendToErrorTrace(tree, if_full.ast.cond_expr)) + _ = try else_scope.addSaveErrRetIndex(.always); + + const else_node = if_full.ast.else_expr; + const else_info: struct { + src: Ast.Node.Index, + result: Zir.Inst.Ref, + } = if (else_node != 0) blk: { + try else_scope.addDbgBlockBegin(); + const sub_scope = s: { + if (if_full.error_token) |error_token| { + const tag: Zir.Inst.Tag = if (payload_is_ref) + .err_union_code_ptr + else + .err_union_code; + const payload_inst = try else_scope.addUnNode(tag, cond.inst, if_full.ast.cond_expr); + const ident_name = try astgen.identAsString(error_token); + const error_token_str = tree.tokenSlice(error_token); + if (mem.eql(u8, "_", error_token_str)) + break :s &else_scope.base; + try astgen.detectLocalShadowing(&else_scope.base, ident_name, error_token, error_token_str, .capture); + payload_val_scope = .{ + .parent = &else_scope.base, + .gen_zir = &else_scope, + .name = ident_name, + .inst = payload_inst, + .token_src = error_token, + .id_cat = .capture, + }; + try else_scope.addDbgVar(.dbg_var_val, ident_name, payload_inst); + break :s &payload_val_scope.base; + } else { + break :s &else_scope.base; + } + }; + const e = try expr(&else_scope, sub_scope, block_scope.break_result_info, else_node); + if (!else_scope.endsWithNoReturn()) { + block_scope.break_count += 1; + + // As our last action before the break, "pop" the error trace if needed + if (do_err_trace) + try restoreErrRetIndex(&else_scope, .{ .block = block }, block_scope.break_result_info, else_node, e); + } + try checkUsed(parent_gz, &else_scope.base, sub_scope); + try else_scope.addDbgBlockEnd(); + break :blk .{ + .src = else_node, + .result = e, + }; + } else .{ + .src = then_node, + .result = switch (ri.rl) { + // Explicitly store void to ptr result loc if there is no else branch + .ptr, .block_ptr => try rvalue(&else_scope, ri, .void_value, node), + else => .none, + }, + }; + + const break_tag: Zir.Inst.Tag = if (parent_gz.force_comptime) .break_inline else .@"break"; + const result = try finishThenElseBlock( + parent_gz, + ri, + node, + &block_scope, + &then_scope, + &else_scope, + condbr, + cond.bool_bit, + then_result, + then_node, + else_info.result, + else_info.src, + block, + block, + break_tag, + ); + return result; +} + +/// Supports `else_scope` stacked on `then_scope`. Unstacks `else_scope` then `then_scope`. +fn setCondBrPayload( + condbr: Zir.Inst.Index, + cond: Zir.Inst.Ref, + then_scope: *GenZir, + then_break: Zir.Inst.Index, + else_scope: *GenZir, + else_break: Zir.Inst.Index, +) !void { + defer then_scope.unstack(); + defer else_scope.unstack(); + const astgen = then_scope.astgen; + const then_body = then_scope.instructionsSliceUpto(else_scope); + const else_body = else_scope.instructionsSlice(); + const then_body_len = astgen.countBodyLenAfterFixups(then_body) + @boolToInt(then_break != 0); + const else_body_len = astgen.countBodyLenAfterFixups(else_body) + @boolToInt(else_break != 0); + try astgen.extra.ensureUnusedCapacity( + astgen.gpa, + @typeInfo(Zir.Inst.CondBr).Struct.fields.len + then_body_len + else_body_len, + ); + + const zir_datas = astgen.instructions.items(.data); + zir_datas[condbr].pl_node.payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.CondBr{ + .condition = cond, + .then_body_len = then_body_len, + .else_body_len = else_body_len, + }); + astgen.appendBodyWithFixups(then_body); + if (then_break != 0) astgen.extra.appendAssumeCapacity(then_break); + astgen.appendBodyWithFixups(else_body); + if (else_break != 0) astgen.extra.appendAssumeCapacity(else_break); +} + +/// Supports `else_scope` stacked on `then_scope`. Unstacks `else_scope` then `then_scope`. +fn setCondBrPayloadElideBlockStorePtr( + condbr: Zir.Inst.Index, + cond: Zir.Inst.Ref, + then_scope: *GenZir, + then_break: Zir.Inst.Index, + else_scope: *GenZir, + else_break: Zir.Inst.Index, + block_ptr: Zir.Inst.Ref, +) !void { + defer then_scope.unstack(); + defer else_scope.unstack(); + const astgen = then_scope.astgen; + const then_body = then_scope.instructionsSliceUpto(else_scope); + const else_body = else_scope.instructionsSlice(); + const has_then_break = then_break != 0; + const has_else_break = else_break != 0; + const then_body_len = astgen.countBodyLenAfterFixups(then_body) + @boolToInt(has_then_break); + const else_body_len = astgen.countBodyLenAfterFixups(else_body) + @boolToInt(has_else_break); + try astgen.extra.ensureUnusedCapacity( + astgen.gpa, + @typeInfo(Zir.Inst.CondBr).Struct.fields.len + then_body_len + else_body_len, + ); + + const zir_tags = astgen.instructions.items(.tag); + const zir_datas = astgen.instructions.items(.data); + + const condbr_pl = astgen.addExtraAssumeCapacity(Zir.Inst.CondBr{ + .condition = cond, + .then_body_len = then_body_len, + .else_body_len = else_body_len, + }); + zir_datas[condbr].pl_node.payload_index = condbr_pl; + const then_body_len_index = condbr_pl + 1; + const else_body_len_index = condbr_pl + 2; + + // The break instructions need to have their operands coerced if the + // switch's result location is a `ty`. In this case we overwrite the + // `store_to_block_ptr` instruction with an `as` instruction and repurpose + // it as the break operand. + // This corresponds to similar code in `labeledBlockExpr`. + for (then_body) |src_inst| { + if (zir_tags[src_inst] == .store_to_block_ptr and + zir_datas[src_inst].bin.lhs == block_ptr) + { + if (then_scope.rl_ty_inst != .none and has_then_break) { + zir_tags[src_inst] = .as; + zir_datas[src_inst].bin = .{ + .lhs = then_scope.rl_ty_inst, + .rhs = zir_datas[then_break].@"break".operand, + }; + zir_datas[then_break].@"break".operand = indexToRef(src_inst); + } else { + astgen.extra.items[then_body_len_index] -= 1; + continue; + } + } + appendPossiblyRefdBodyInst(astgen, &astgen.extra, src_inst); + } + if (has_then_break) astgen.extra.appendAssumeCapacity(then_break); + + for (else_body) |src_inst| { + if (zir_tags[src_inst] == .store_to_block_ptr and + zir_datas[src_inst].bin.lhs == block_ptr) + { + if (else_scope.rl_ty_inst != .none and has_else_break) { + zir_tags[src_inst] = .as; + zir_datas[src_inst].bin = .{ + .lhs = else_scope.rl_ty_inst, + .rhs = zir_datas[else_break].@"break".operand, + }; + zir_datas[else_break].@"break".operand = indexToRef(src_inst); + } else { + astgen.extra.items[else_body_len_index] -= 1; + continue; + } + } + appendPossiblyRefdBodyInst(astgen, &astgen.extra, src_inst); + } + if (has_else_break) astgen.extra.appendAssumeCapacity(else_break); +} + +fn whileExpr( + parent_gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + while_full: Ast.full.While, + is_statement: bool, +) InnerError!Zir.Inst.Ref { + const astgen = parent_gz.astgen; + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + + if (while_full.label_token) |label_token| { + try astgen.checkLabelRedefinition(scope, label_token); + } + + const is_inline = parent_gz.force_comptime or while_full.inline_token != null; + const loop_tag: Zir.Inst.Tag = if (is_inline) .block_inline else .loop; + const loop_block = try parent_gz.makeBlockInst(loop_tag, node); + try parent_gz.instructions.append(astgen.gpa, loop_block); + + var loop_scope = parent_gz.makeSubBlock(scope); + loop_scope.is_inline = is_inline; + loop_scope.setBreakResultInfo(ri); + defer loop_scope.unstack(); + defer loop_scope.labeled_breaks.deinit(astgen.gpa); + + var cond_scope = parent_gz.makeSubBlock(&loop_scope.base); + defer cond_scope.unstack(); + + const payload_is_ref = if (while_full.payload_token) |payload_token| + token_tags[payload_token] == .asterisk + else + false; + + try emitDbgNode(parent_gz, while_full.ast.cond_expr); + const cond: struct { + inst: Zir.Inst.Ref, + bool_bit: Zir.Inst.Ref, + } = c: { + if (while_full.error_token) |_| { + const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none }; + const err_union = try expr(&cond_scope, &cond_scope.base, cond_ri, while_full.ast.cond_expr); + const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_err_ptr else .is_non_err; + break :c .{ + .inst = err_union, + .bool_bit = try cond_scope.addUnNode(tag, err_union, while_full.ast.cond_expr), + }; + } else if (while_full.payload_token) |_| { + const cond_ri: ResultInfo = .{ .rl = if (payload_is_ref) .ref else .none }; + const optional = try expr(&cond_scope, &cond_scope.base, cond_ri, while_full.ast.cond_expr); + const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_null_ptr else .is_non_null; + break :c .{ + .inst = optional, + .bool_bit = try cond_scope.addUnNode(tag, optional, while_full.ast.cond_expr), + }; + } else { + const cond = try expr(&cond_scope, &cond_scope.base, bool_ri, while_full.ast.cond_expr); + break :c .{ + .inst = cond, + .bool_bit = cond, + }; + } + }; + + const condbr_tag: Zir.Inst.Tag = if (is_inline) .condbr_inline else .condbr; + const condbr = try cond_scope.addCondBr(condbr_tag, node); + const block_tag: Zir.Inst.Tag = if (is_inline) .block_inline else .block; + const cond_block = try loop_scope.makeBlockInst(block_tag, node); + try cond_scope.setBlockBody(cond_block); + // cond_scope unstacked now, can add new instructions to loop_scope + try loop_scope.instructions.append(astgen.gpa, cond_block); + + // make scope now but don't stack on parent_gz until loop_scope + // gets unstacked after cont_expr is emitted and added below + var then_scope = parent_gz.makeSubBlock(&cond_scope.base); + then_scope.instructions_top = GenZir.unstacked_top; + defer then_scope.unstack(); + + var dbg_var_name: ?u32 = null; + var dbg_var_inst: Zir.Inst.Ref = undefined; + var payload_inst: Zir.Inst.Index = 0; + var payload_val_scope: Scope.LocalVal = undefined; + const then_sub_scope = s: { + if (while_full.error_token != null) { + if (while_full.payload_token) |payload_token| { + const tag: Zir.Inst.Tag = if (payload_is_ref) + .err_union_payload_unsafe_ptr + else + .err_union_payload_unsafe; + // will add this instruction to then_scope.instructions below + payload_inst = try then_scope.makeUnNode(tag, cond.inst, while_full.ast.cond_expr); + const ident_token = if (payload_is_ref) payload_token + 1 else payload_token; + const ident_bytes = tree.tokenSlice(ident_token); + if (mem.eql(u8, "_", ident_bytes)) + break :s &then_scope.base; + const payload_name_loc = payload_token + @boolToInt(payload_is_ref); + const ident_name = try astgen.identAsString(payload_name_loc); + try astgen.detectLocalShadowing(&then_scope.base, ident_name, payload_name_loc, ident_bytes, .capture); + payload_val_scope = .{ + .parent = &then_scope.base, + .gen_zir = &then_scope, + .name = ident_name, + .inst = indexToRef(payload_inst), + .token_src = payload_token, + .id_cat = .capture, + }; + dbg_var_name = ident_name; + dbg_var_inst = indexToRef(payload_inst); + break :s &payload_val_scope.base; + } else { + _ = try then_scope.addUnNode(.ensure_err_union_payload_void, cond.inst, node); + break :s &then_scope.base; + } + } else if (while_full.payload_token) |payload_token| { + const ident_token = if (payload_is_ref) payload_token + 1 else payload_token; + const tag: Zir.Inst.Tag = if (payload_is_ref) + .optional_payload_unsafe_ptr + else + .optional_payload_unsafe; + // will add this instruction to then_scope.instructions below + payload_inst = try then_scope.makeUnNode(tag, cond.inst, while_full.ast.cond_expr); + const ident_name = try astgen.identAsString(ident_token); + const ident_bytes = tree.tokenSlice(ident_token); + if (mem.eql(u8, "_", ident_bytes)) + break :s &then_scope.base; + try astgen.detectLocalShadowing(&then_scope.base, ident_name, ident_token, ident_bytes, .capture); + payload_val_scope = .{ + .parent = &then_scope.base, + .gen_zir = &then_scope, + .name = ident_name, + .inst = indexToRef(payload_inst), + .token_src = ident_token, + .id_cat = .capture, + }; + dbg_var_name = ident_name; + dbg_var_inst = indexToRef(payload_inst); + break :s &payload_val_scope.base; + } else { + break :s &then_scope.base; + } + }; + + var continue_scope = parent_gz.makeSubBlock(then_sub_scope); + continue_scope.instructions_top = GenZir.unstacked_top; + defer continue_scope.unstack(); + const continue_block = try then_scope.makeBlockInst(block_tag, node); + + const repeat_tag: Zir.Inst.Tag = if (is_inline) .repeat_inline else .repeat; + _ = try loop_scope.addNode(repeat_tag, node); + + try loop_scope.setBlockBody(loop_block); + loop_scope.break_block = loop_block; + loop_scope.continue_block = continue_block; + if (while_full.label_token) |label_token| { + loop_scope.label = @as(?GenZir.Label, GenZir.Label{ + .token = label_token, + .block_inst = loop_block, + }); + } + + // done adding instructions to loop_scope, can now stack then_scope + then_scope.instructions_top = then_scope.instructions.items.len; + + try then_scope.addDbgBlockBegin(); + const then_node = while_full.ast.then_expr; + if (payload_inst != 0) try then_scope.instructions.append(astgen.gpa, payload_inst); + if (dbg_var_name) |name| try then_scope.addDbgVar(.dbg_var_val, name, dbg_var_inst); + try then_scope.instructions.append(astgen.gpa, continue_block); + // This code could be improved to avoid emitting the continue expr when there + // are no jumps to it. This happens when the last statement of a while body is noreturn + // and there are no `continue` statements. + // Tracking issue: https://github.com/ziglang/zig/issues/9185 + if (while_full.ast.cont_expr != 0) { + _ = try unusedResultExpr(&then_scope, then_sub_scope, while_full.ast.cont_expr); + } + try then_scope.addDbgBlockEnd(); + + continue_scope.instructions_top = continue_scope.instructions.items.len; + _ = try unusedResultExpr(&continue_scope, &continue_scope.base, then_node); + try checkUsed(parent_gz, &then_scope.base, then_sub_scope); + const break_tag: Zir.Inst.Tag = if (is_inline) .break_inline else .@"break"; + if (!continue_scope.endsWithNoReturn()) { + const break_inst = try continue_scope.makeBreak(break_tag, continue_block, .void_value); + try then_scope.instructions.append(astgen.gpa, break_inst); + } + try continue_scope.setBlockBody(continue_block); + + var else_scope = parent_gz.makeSubBlock(&cond_scope.base); + defer else_scope.unstack(); + + const else_node = while_full.ast.else_expr; + const else_info: struct { + src: Ast.Node.Index, + result: Zir.Inst.Ref, + } = if (else_node != 0) blk: { + try else_scope.addDbgBlockBegin(); + const sub_scope = s: { + if (while_full.error_token) |error_token| { + const tag: Zir.Inst.Tag = if (payload_is_ref) + .err_union_code_ptr + else + .err_union_code; + const else_payload_inst = try else_scope.addUnNode(tag, cond.inst, while_full.ast.cond_expr); + const ident_name = try astgen.identAsString(error_token); + const ident_bytes = tree.tokenSlice(error_token); + if (mem.eql(u8, ident_bytes, "_")) + break :s &else_scope.base; + try astgen.detectLocalShadowing(&else_scope.base, ident_name, error_token, ident_bytes, .capture); + payload_val_scope = .{ + .parent = &else_scope.base, + .gen_zir = &else_scope, + .name = ident_name, + .inst = else_payload_inst, + .token_src = error_token, + .id_cat = .capture, + }; + try else_scope.addDbgVar(.dbg_var_val, ident_name, else_payload_inst); + break :s &payload_val_scope.base; + } else { + break :s &else_scope.base; + } + }; + // Remove the continue block and break block so that `continue` and `break` + // control flow apply to outer loops; not this one. + loop_scope.continue_block = 0; + loop_scope.break_block = 0; + const else_result = try expr(&else_scope, sub_scope, loop_scope.break_result_info, else_node); + if (is_statement) { + _ = try addEnsureResult(&else_scope, else_result, else_node); + } + + if (!else_scope.endsWithNoReturn()) { + loop_scope.break_count += 1; + } + try checkUsed(parent_gz, &else_scope.base, sub_scope); + try else_scope.addDbgBlockEnd(); + break :blk .{ + .src = else_node, + .result = else_result, + }; + } else .{ + .src = then_node, + .result = .none, + }; + + if (loop_scope.label) |some| { + if (!some.used) { + try astgen.appendErrorTok(some.token, "unused while loop label", .{}); + } + } + const result = try finishThenElseBlock( + parent_gz, + ri, + node, + &loop_scope, + &then_scope, + &else_scope, + condbr, + cond.bool_bit, + .void_value, + then_node, + else_info.result, + else_info.src, + loop_block, + cond_block, + break_tag, + ); + if (is_statement) { + _ = try parent_gz.addUnNode(.ensure_result_used, result, node); + } + return result; +} + +fn forExpr( + parent_gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + for_full: Ast.full.For, + is_statement: bool, +) InnerError!Zir.Inst.Ref { + const astgen = parent_gz.astgen; + + if (for_full.label_token) |label_token| { + try astgen.checkLabelRedefinition(scope, label_token); + } + + const is_inline = parent_gz.force_comptime or for_full.inline_token != null; + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + const node_tags = tree.nodes.items(.tag); + const node_data = tree.nodes.items(.data); + const gpa = astgen.gpa; + + // TODO this can be deleted after zig 0.11.0 is released because it + // will be caught in the parser. + if (for_full.isOldSyntax(token_tags)) { + return astgen.failTokNotes( + for_full.payload_token + 2, + "extra capture in for loop", + .{}, + &[_]u32{ + try astgen.errNoteTok( + for_full.payload_token + 2, + "run 'zig fmt' to upgrade your code automatically", + .{}, + ), + }, + ); + } + + // For counters, this is the start value; for indexables, this is the base + // pointer that can be used with elem_ptr and similar instructions. + // Special value `none` means that this is a counter and its start value is + // zero, indicating that the main index counter can be used directly. + const indexables = try gpa.alloc(Zir.Inst.Ref, for_full.ast.inputs.len); + defer gpa.free(indexables); + // elements of this array can be `none`, indicating no length check. + const lens = try gpa.alloc(Zir.Inst.Ref, for_full.ast.inputs.len); + defer gpa.free(lens); + + // We will use a single zero-based counter no matter how many indexables there are. + const index_ptr = blk: { + const alloc_tag: Zir.Inst.Tag = if (is_inline) .alloc_comptime_mut else .alloc; + const index_ptr = try parent_gz.addUnNode(alloc_tag, .usize_type, node); + // initialize to zero + _ = try parent_gz.addBin(.store, index_ptr, .zero_usize); + break :blk index_ptr; + }; + + var any_len_checks = false; + + { + var capture_token = for_full.payload_token; + for (for_full.ast.inputs, 0..) |input, i_usize| { + const i = @intCast(u32, i_usize); + const capture_is_ref = token_tags[capture_token] == .asterisk; + const ident_tok = capture_token + @boolToInt(capture_is_ref); + const is_discard = mem.eql(u8, tree.tokenSlice(ident_tok), "_"); + + if (is_discard and capture_is_ref) { + return astgen.failTok(capture_token, "pointer modifier invalid on discard", .{}); + } + // Skip over the comma, and on to the next capture (or the ending pipe character). + capture_token = ident_tok + 2; + + try emitDbgNode(parent_gz, input); + if (node_tags[input] == .for_range) { + if (capture_is_ref) { + return astgen.failTok(ident_tok, "cannot capture reference to range", .{}); + } + const start_node = node_data[input].lhs; + const start_val = try expr(parent_gz, scope, .{ .rl = .none }, start_node); + + const end_node = node_data[input].rhs; + const end_val = if (end_node != 0) + try expr(parent_gz, scope, .{ .rl = .none }, node_data[input].rhs) + else + .none; + + if (end_val == .none and is_discard) { + return astgen.failTok(ident_tok, "discard of unbounded counter", .{}); + } + + const start_is_zero = nodeIsTriviallyZero(tree, start_node); + const range_len = if (end_val == .none or start_is_zero) + end_val + else + try parent_gz.addPlNode(.sub, input, Zir.Inst.Bin{ + .lhs = end_val, + .rhs = start_val, + }); + + any_len_checks = any_len_checks or range_len != .none; + indexables[i] = if (start_is_zero) .none else start_val; + lens[i] = range_len; + } else { + const indexable = try expr(parent_gz, scope, .{ .rl = .none }, input); + + any_len_checks = true; + indexables[i] = indexable; + lens[i] = indexable; + } + } + } + + if (!any_len_checks) { + return astgen.failNode(node, "unbounded for loop", .{}); + } + + // We use a dedicated ZIR instruction to assert the lengths to assist with + // nicer error reporting as well as fewer ZIR bytes emitted. + const len: Zir.Inst.Ref = len: { + const lens_len = @intCast(u32, lens.len); + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.MultiOp).Struct.fields.len + lens_len); + const len = try parent_gz.addPlNode(.for_len, node, Zir.Inst.MultiOp{ + .operands_len = lens_len, + }); + appendRefsAssumeCapacity(astgen, lens); + break :len len; + }; + + const loop_tag: Zir.Inst.Tag = if (is_inline) .block_inline else .loop; + const loop_block = try parent_gz.makeBlockInst(loop_tag, node); + try parent_gz.instructions.append(gpa, loop_block); + + var loop_scope = parent_gz.makeSubBlock(scope); + loop_scope.is_inline = is_inline; + loop_scope.setBreakResultInfo(ri); + defer loop_scope.unstack(); + defer loop_scope.labeled_breaks.deinit(gpa); + + const index = try loop_scope.addUnNode(.load, index_ptr, node); + + var cond_scope = parent_gz.makeSubBlock(&loop_scope.base); + defer cond_scope.unstack(); + + // Check the condition. + const cond = try cond_scope.addPlNode(.cmp_lt, node, Zir.Inst.Bin{ + .lhs = index, + .rhs = len, + }); + + const condbr_tag: Zir.Inst.Tag = if (is_inline) .condbr_inline else .condbr; + const condbr = try cond_scope.addCondBr(condbr_tag, node); + const block_tag: Zir.Inst.Tag = if (is_inline) .block_inline else .block; + const cond_block = try loop_scope.makeBlockInst(block_tag, node); + try cond_scope.setBlockBody(cond_block); + // cond_block unstacked now, can add new instructions to loop_scope + try loop_scope.instructions.append(gpa, cond_block); + + // Increment the index variable. + const index_plus_one = try loop_scope.addPlNode(.add_unsafe, node, Zir.Inst.Bin{ + .lhs = index, + .rhs = .one_usize, + }); + _ = try loop_scope.addBin(.store, index_ptr, index_plus_one); + const repeat_tag: Zir.Inst.Tag = if (is_inline) .repeat_inline else .repeat; + _ = try loop_scope.addNode(repeat_tag, node); + + try loop_scope.setBlockBody(loop_block); + loop_scope.break_block = loop_block; + loop_scope.continue_block = cond_block; + if (for_full.label_token) |label_token| { + loop_scope.label = @as(?GenZir.Label, GenZir.Label{ + .token = label_token, + .block_inst = loop_block, + }); + } + + var then_node = for_full.ast.then_expr; + var then_scope = parent_gz.makeSubBlock(&cond_scope.base); + defer then_scope.unstack(); + + try then_scope.addDbgBlockBegin(); + + const capture_scopes = try gpa.alloc(Scope.LocalVal, for_full.ast.inputs.len); + defer gpa.free(capture_scopes); + + const then_sub_scope = blk: { + var capture_token = for_full.payload_token; + var capture_sub_scope: *Scope = &then_scope.base; + for (for_full.ast.inputs, 0..) |input, i_usize| { + const i = @intCast(u32, i_usize); + const capture_is_ref = token_tags[capture_token] == .asterisk; + const ident_tok = capture_token + @boolToInt(capture_is_ref); + const capture_name = tree.tokenSlice(ident_tok); + // Skip over the comma, and on to the next capture (or the ending pipe character). + capture_token = ident_tok + 2; + + if (mem.eql(u8, capture_name, "_")) continue; + + const name_str_index = try astgen.identAsString(ident_tok); + try astgen.detectLocalShadowing(capture_sub_scope, name_str_index, ident_tok, capture_name, .capture); + + const capture_inst = inst: { + const is_counter = node_tags[input] == .for_range; + + if (indexables[i] == .none) { + // Special case: the main index can be used directly. + assert(is_counter); + assert(!capture_is_ref); + break :inst index; + } + + // For counters, we add the index variable to the start value; for + // indexables, we use it as an element index. This is so similar + // that they can share the same code paths, branching only on the + // ZIR tag. + const switch_cond = (@as(u2, @boolToInt(capture_is_ref)) << 1) | @boolToInt(is_counter); + const tag: Zir.Inst.Tag = switch (switch_cond) { + 0b00 => .elem_val, + 0b01 => .add, + 0b10 => .elem_ptr, + 0b11 => unreachable, // compile error emitted already + }; + break :inst try then_scope.addPlNode(tag, input, Zir.Inst.Bin{ + .lhs = indexables[i], + .rhs = index, + }); + }; + + capture_scopes[i] = .{ + .parent = capture_sub_scope, + .gen_zir = &then_scope, + .name = name_str_index, + .inst = capture_inst, + .token_src = ident_tok, + .id_cat = .capture, + }; + + try then_scope.addDbgVar(.dbg_var_val, name_str_index, capture_inst); + capture_sub_scope = &capture_scopes[i].base; + } + + break :blk capture_sub_scope; + }; + + const then_result = try expr(&then_scope, then_sub_scope, .{ .rl = .none }, then_node); + _ = try addEnsureResult(&then_scope, then_result, then_node); + + try checkUsed(parent_gz, &then_scope.base, then_sub_scope); + try then_scope.addDbgBlockEnd(); + + var else_scope = parent_gz.makeSubBlock(&cond_scope.base); + defer else_scope.unstack(); + + const else_node = for_full.ast.else_expr; + const else_info: struct { + src: Ast.Node.Index, + result: Zir.Inst.Ref, + } = if (else_node != 0) blk: { + const sub_scope = &else_scope.base; + // Remove the continue block and break block so that `continue` and `break` + // control flow apply to outer loops; not this one. + loop_scope.continue_block = 0; + loop_scope.break_block = 0; + const else_result = try expr(&else_scope, sub_scope, loop_scope.break_result_info, else_node); + if (is_statement) { + _ = try addEnsureResult(&else_scope, else_result, else_node); + } + + if (!else_scope.endsWithNoReturn()) { + loop_scope.break_count += 1; + } + break :blk .{ + .src = else_node, + .result = else_result, + }; + } else .{ + .src = then_node, + .result = .none, + }; + + if (loop_scope.label) |some| { + if (!some.used) { + try astgen.appendErrorTok(some.token, "unused for loop label", .{}); + } + } + const break_tag: Zir.Inst.Tag = if (is_inline) .break_inline else .@"break"; + const result = try finishThenElseBlock( + parent_gz, + ri, + node, + &loop_scope, + &then_scope, + &else_scope, + condbr, + cond, + then_result, + then_node, + else_info.result, + else_info.src, + loop_block, + cond_block, + break_tag, + ); + if (ri.rl.strategy(&loop_scope).tag == .break_void and loop_scope.break_count == 0) { + _ = try rvalue(parent_gz, ri, .void_value, node); + } + if (is_statement) { + _ = try parent_gz.addUnNode(.ensure_result_used, result, node); + } + return result; +} + +fn switchExpr( + parent_gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + switch_node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = parent_gz.astgen; + const gpa = astgen.gpa; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + const operand_node = node_datas[switch_node].lhs; + const extra = tree.extraData(node_datas[switch_node].rhs, Ast.Node.SubRange); + const case_nodes = tree.extra_data[extra.start..extra.end]; + + // We perform two passes over the AST. This first pass is to collect information + // for the following variables, make note of the special prong AST node index, + // and bail out with a compile error if there are multiple special prongs present. + var any_payload_is_ref = false; + var scalar_cases_len: u32 = 0; + var multi_cases_len: u32 = 0; + var inline_cases_len: u32 = 0; + var special_prong: Zir.SpecialProng = .none; + var special_node: Ast.Node.Index = 0; + var else_src: ?Ast.TokenIndex = null; + var underscore_src: ?Ast.TokenIndex = null; + for (case_nodes) |case_node| { + const case = tree.fullSwitchCase(case_node).?; + if (case.payload_token) |payload_token| { + if (token_tags[payload_token] == .asterisk) { + any_payload_is_ref = true; + } + } + // Check for else/`_` prong. + if (case.ast.values.len == 0) { + const case_src = case.ast.arrow_token - 1; + if (else_src) |src| { + return astgen.failTokNotes( + case_src, + "multiple else prongs in switch expression", + .{}, + &[_]u32{ + try astgen.errNoteTok( + src, + "previous else prong here", + .{}, + ), + }, + ); + } else if (underscore_src) |some_underscore| { + return astgen.failNodeNotes( + switch_node, + "else and '_' prong in switch expression", + .{}, + &[_]u32{ + try astgen.errNoteTok( + case_src, + "else prong here", + .{}, + ), + try astgen.errNoteTok( + some_underscore, + "'_' prong here", + .{}, + ), + }, + ); + } + special_node = case_node; + special_prong = .@"else"; + else_src = case_src; + continue; + } else if (case.ast.values.len == 1 and + node_tags[case.ast.values[0]] == .identifier and + mem.eql(u8, tree.tokenSlice(main_tokens[case.ast.values[0]]), "_")) + { + const case_src = case.ast.arrow_token - 1; + if (underscore_src) |src| { + return astgen.failTokNotes( + case_src, + "multiple '_' prongs in switch expression", + .{}, + &[_]u32{ + try astgen.errNoteTok( + src, + "previous '_' prong here", + .{}, + ), + }, + ); + } else if (else_src) |some_else| { + return astgen.failNodeNotes( + switch_node, + "else and '_' prong in switch expression", + .{}, + &[_]u32{ + try astgen.errNoteTok( + some_else, + "else prong here", + .{}, + ), + try astgen.errNoteTok( + case_src, + "'_' prong here", + .{}, + ), + }, + ); + } + if (case.inline_token != null) { + return astgen.failTok(case_src, "cannot inline '_' prong", .{}); + } + special_node = case_node; + special_prong = .under; + underscore_src = case_src; + continue; + } + + for (case.ast.values) |val| { + if (node_tags[val] == .string_literal) + return astgen.failNode(val, "cannot switch on strings", .{}); + } + + if (case.ast.values.len == 1 and node_tags[case.ast.values[0]] != .switch_range) { + scalar_cases_len += 1; + } else { + multi_cases_len += 1; + } + if (case.inline_token != null) { + inline_cases_len += 1; + } + } + + const operand_ri: ResultInfo = .{ .rl = if (any_payload_is_ref) .ref else .none }; + astgen.advanceSourceCursorToNode(operand_node); + const operand_line = astgen.source_line - parent_gz.decl_line; + const operand_column = astgen.source_column; + const raw_operand = try expr(parent_gz, scope, operand_ri, operand_node); + const cond_tag: Zir.Inst.Tag = if (any_payload_is_ref) .switch_cond_ref else .switch_cond; + const cond = try parent_gz.addUnNode(cond_tag, raw_operand, operand_node); + // Sema expects a dbg_stmt immediately after switch_cond(_ref) + try emitDbgStmt(parent_gz, operand_line, operand_column); + // We need the type of the operand to use as the result location for all the prong items. + const cond_ty_inst = try parent_gz.addUnNode(.typeof, cond, operand_node); + const item_ri: ResultInfo = .{ .rl = .{ .ty = cond_ty_inst } }; + + // This contains the data that goes into the `extra` array for the SwitchBlock/SwitchBlockMulti, + // except the first cases_nodes.len slots are a table that indexes payloads later in the array, with + // the special case index coming first, then scalar_case_len indexes, then multi_cases_len indexes + const payloads = &astgen.scratch; + const scratch_top = astgen.scratch.items.len; + const case_table_start = scratch_top; + const scalar_case_table = case_table_start + @boolToInt(special_prong != .none); + const multi_case_table = scalar_case_table + scalar_cases_len; + const case_table_end = multi_case_table + multi_cases_len; + try astgen.scratch.resize(gpa, case_table_end); + defer astgen.scratch.items.len = scratch_top; + + var block_scope = parent_gz.makeSubBlock(scope); + // block_scope not used for collecting instructions + block_scope.instructions_top = GenZir.unstacked_top; + block_scope.setBreakResultInfo(ri); + + // This gets added to the parent block later, after the item expressions. + const switch_block = try parent_gz.makeBlockInst(.switch_block, switch_node); + + // We re-use this same scope for all cases, including the special prong, if any. + var case_scope = parent_gz.makeSubBlock(&block_scope.base); + case_scope.instructions_top = GenZir.unstacked_top; + + // In this pass we generate all the item and prong expressions. + var multi_case_index: u32 = 0; + var scalar_case_index: u32 = 0; + for (case_nodes) |case_node| { + const case = tree.fullSwitchCase(case_node).?; + + const is_multi_case = case.ast.values.len > 1 or + (case.ast.values.len == 1 and node_tags[case.ast.values[0]] == .switch_range); + + var dbg_var_name: ?u32 = null; + var dbg_var_inst: Zir.Inst.Ref = undefined; + var dbg_var_tag_name: ?u32 = null; + var dbg_var_tag_inst: Zir.Inst.Ref = undefined; + var capture_inst: Zir.Inst.Index = 0; + var tag_inst: Zir.Inst.Index = 0; + var capture_val_scope: Scope.LocalVal = undefined; + var tag_scope: Scope.LocalVal = undefined; + const sub_scope = blk: { + const payload_token = case.payload_token orelse break :blk &case_scope.base; + const ident = if (token_tags[payload_token] == .asterisk) + payload_token + 1 + else + payload_token; + const is_ptr = ident != payload_token; + const ident_slice = tree.tokenSlice(ident); + var payload_sub_scope: *Scope = undefined; + if (mem.eql(u8, ident_slice, "_")) { + if (is_ptr) { + return astgen.failTok(payload_token, "pointer modifier invalid on discard", .{}); + } + payload_sub_scope = &case_scope.base; + } else { + if (case_node == special_node) { + const capture_tag: Zir.Inst.Tag = if (is_ptr) + .switch_capture_ref + else + .switch_capture; + capture_inst = @intCast(Zir.Inst.Index, astgen.instructions.len); + try astgen.instructions.append(gpa, .{ + .tag = capture_tag, + .data = .{ + .switch_capture = .{ + .switch_inst = switch_block, + // Max int communicates that this is the else/underscore prong. + .prong_index = std.math.maxInt(u32), + }, + }, + }); + } else { + const is_multi_case_bits: u2 = @boolToInt(is_multi_case); + const is_ptr_bits: u2 = @boolToInt(is_ptr); + const capture_tag: Zir.Inst.Tag = switch ((is_multi_case_bits << 1) | is_ptr_bits) { + 0b00 => .switch_capture, + 0b01 => .switch_capture_ref, + 0b10 => .switch_capture_multi, + 0b11 => .switch_capture_multi_ref, + }; + const capture_index = if (is_multi_case) multi_case_index else scalar_case_index; + capture_inst = @intCast(Zir.Inst.Index, astgen.instructions.len); + try astgen.instructions.append(gpa, .{ + .tag = capture_tag, + .data = .{ .switch_capture = .{ + .switch_inst = switch_block, + .prong_index = capture_index, + } }, + }); + } + const capture_name = try astgen.identAsString(ident); + try astgen.detectLocalShadowing(&case_scope.base, capture_name, ident, ident_slice, .capture); + capture_val_scope = .{ + .parent = &case_scope.base, + .gen_zir = &case_scope, + .name = capture_name, + .inst = indexToRef(capture_inst), + .token_src = payload_token, + .id_cat = .capture, + }; + dbg_var_name = capture_name; + dbg_var_inst = indexToRef(capture_inst); + payload_sub_scope = &capture_val_scope.base; + } + + const tag_token = if (token_tags[ident + 1] == .comma) + ident + 2 + else + break :blk payload_sub_scope; + const tag_slice = tree.tokenSlice(tag_token); + if (mem.eql(u8, tag_slice, "_")) { + return astgen.failTok(tag_token, "discard of tag capture; omit it instead", .{}); + } else if (case.inline_token == null) { + return astgen.failTok(tag_token, "tag capture on non-inline prong", .{}); + } + const tag_name = try astgen.identAsString(tag_token); + try astgen.detectLocalShadowing(payload_sub_scope, tag_name, tag_token, tag_slice, .@"switch tag capture"); + tag_inst = @intCast(Zir.Inst.Index, astgen.instructions.len); + try astgen.instructions.append(gpa, .{ + .tag = .switch_capture_tag, + .data = .{ .un_tok = .{ + .operand = cond, + .src_tok = case_scope.tokenIndexToRelative(tag_token), + } }, + }); + + tag_scope = .{ + .parent = payload_sub_scope, + .gen_zir = &case_scope, + .name = tag_name, + .inst = indexToRef(tag_inst), + .token_src = tag_token, + .id_cat = .@"switch tag capture", + }; + dbg_var_tag_name = tag_name; + dbg_var_tag_inst = indexToRef(tag_inst); + break :blk &tag_scope.base; + }; + + const header_index = @intCast(u32, payloads.items.len); + const body_len_index = if (is_multi_case) blk: { + payloads.items[multi_case_table + multi_case_index] = header_index; + multi_case_index += 1; + try payloads.resize(gpa, header_index + 3); // items_len, ranges_len, body_len + + // items + var items_len: u32 = 0; + for (case.ast.values) |item_node| { + if (node_tags[item_node] == .switch_range) continue; + items_len += 1; + + const item_inst = try comptimeExpr(parent_gz, scope, item_ri, item_node); + try payloads.append(gpa, @enumToInt(item_inst)); + } + + // ranges + var ranges_len: u32 = 0; + for (case.ast.values) |range| { + if (node_tags[range] != .switch_range) continue; + ranges_len += 1; + + const first = try comptimeExpr(parent_gz, scope, item_ri, node_datas[range].lhs); + const last = try comptimeExpr(parent_gz, scope, item_ri, node_datas[range].rhs); + try payloads.appendSlice(gpa, &[_]u32{ + @enumToInt(first), @enumToInt(last), + }); + } + + payloads.items[header_index] = items_len; + payloads.items[header_index + 1] = ranges_len; + break :blk header_index + 2; + } else if (case_node == special_node) blk: { + payloads.items[case_table_start] = header_index; + try payloads.resize(gpa, header_index + 1); // body_len + break :blk header_index; + } else blk: { + payloads.items[scalar_case_table + scalar_case_index] = header_index; + scalar_case_index += 1; + try payloads.resize(gpa, header_index + 2); // item, body_len + const item_node = case.ast.values[0]; + const item_inst = try comptimeExpr(parent_gz, scope, item_ri, item_node); + payloads.items[header_index] = @enumToInt(item_inst); + break :blk header_index + 1; + }; + + { + // temporarily stack case_scope on parent_gz + case_scope.instructions_top = parent_gz.instructions.items.len; + defer case_scope.unstack(); + + if (capture_inst != 0) try case_scope.instructions.append(gpa, capture_inst); + if (tag_inst != 0) try case_scope.instructions.append(gpa, tag_inst); + try case_scope.addDbgBlockBegin(); + if (dbg_var_name) |some| { + try case_scope.addDbgVar(.dbg_var_val, some, dbg_var_inst); + } + if (dbg_var_tag_name) |some| { + try case_scope.addDbgVar(.dbg_var_val, some, dbg_var_tag_inst); + } + const target_expr_node = case.ast.target_expr; + const case_result = try expr(&case_scope, sub_scope, block_scope.break_result_info, target_expr_node); + try checkUsed(parent_gz, &case_scope.base, sub_scope); + try case_scope.addDbgBlockEnd(); + if (!parent_gz.refIsNoReturn(case_result)) { + block_scope.break_count += 1; + _ = try case_scope.addBreakWithSrcNode(.@"break", switch_block, case_result, target_expr_node); + } + + const case_slice = case_scope.instructionsSlice(); + const body_len = astgen.countBodyLenAfterFixups(case_slice); + try payloads.ensureUnusedCapacity(gpa, body_len); + const inline_bit = @as(u32, @boolToInt(case.inline_token != null)) << 31; + payloads.items[body_len_index] = body_len | inline_bit; + appendBodyWithFixupsArrayList(astgen, payloads, case_slice); + } + } + // Now that the item expressions are generated we can add this. + try parent_gz.instructions.append(gpa, switch_block); + + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.SwitchBlock).Struct.fields.len + + @boolToInt(multi_cases_len != 0) + + payloads.items.len - case_table_end); + + const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.SwitchBlock{ + .operand = cond, + .bits = Zir.Inst.SwitchBlock.Bits{ + .has_multi_cases = multi_cases_len != 0, + .has_else = special_prong == .@"else", + .has_under = special_prong == .under, + .scalar_cases_len = @intCast(Zir.Inst.SwitchBlock.Bits.ScalarCasesLen, scalar_cases_len), + }, + }); + + if (multi_cases_len != 0) { + astgen.extra.appendAssumeCapacity(multi_cases_len); + } + + const zir_datas = astgen.instructions.items(.data); + const zir_tags = astgen.instructions.items(.tag); + + zir_datas[switch_block].pl_node.payload_index = payload_index; + + const strat = ri.rl.strategy(&block_scope); + for (payloads.items[case_table_start..case_table_end], 0..) |start_index, i| { + var body_len_index = start_index; + var end_index = start_index; + const table_index = case_table_start + i; + if (table_index < scalar_case_table) { + end_index += 1; + } else if (table_index < multi_case_table) { + body_len_index += 1; + end_index += 2; + } else { + body_len_index += 2; + const items_len = payloads.items[start_index]; + const ranges_len = payloads.items[start_index + 1]; + end_index += 3 + items_len + 2 * ranges_len; + } + + const body_len = @truncate(u31, payloads.items[body_len_index]); + end_index += body_len; + + switch (strat.tag) { + .break_operand => blk: { + // Switch expressions return `true` for `nodeMayNeedMemoryLocation` thus + // `elide_store_to_block_ptr_instructions` will either be true, + // or all prongs are noreturn. + if (!strat.elide_store_to_block_ptr_instructions) + break :blk; + + // There will necessarily be a store_to_block_ptr for + // all prongs, except for prongs that ended with a noreturn instruction. + // Elide all the `store_to_block_ptr` instructions. + + // The break instructions need to have their operands coerced if the + // switch's result location is a `ty`. In this case we overwrite the + // `store_to_block_ptr` instruction with an `as` instruction and repurpose + // it as the break operand. + if (body_len < 2) + break :blk; + + var store_index = end_index - 2; + while (true) : (store_index -= 1) switch (zir_tags[payloads.items[store_index]]) { + .dbg_block_end, .dbg_block_begin, .dbg_stmt, .dbg_var_val, .dbg_var_ptr => {}, + else => break, + }; + const store_inst = payloads.items[store_index]; + if (zir_tags[store_inst] != .store_to_block_ptr or + zir_datas[store_inst].bin.lhs != block_scope.rl_ptr) + break :blk; + const break_inst = payloads.items[end_index - 1]; + if (block_scope.rl_ty_inst != .none) { + zir_tags[store_inst] = .as; + zir_datas[store_inst].bin = .{ + .lhs = block_scope.rl_ty_inst, + .rhs = zir_datas[break_inst].@"break".operand, + }; + zir_datas[break_inst].@"break".operand = indexToRef(store_inst); + } else { + payloads.items[body_len_index] -= 1; + astgen.extra.appendSliceAssumeCapacity(payloads.items[start_index .. end_index - 2]); + astgen.extra.appendAssumeCapacity(break_inst); + continue; + } + }, + .break_void => { + assert(!strat.elide_store_to_block_ptr_instructions); + const last_inst = payloads.items[end_index - 1]; + if (zir_tags[last_inst] == .@"break") { + const inst_data = zir_datas[last_inst].@"break"; + const block_inst = astgen.extra.items[inst_data.payload_index]; + if (block_inst == switch_block) { + zir_datas[last_inst].@"break".operand = .void_value; + } + } + }, + } + + astgen.extra.appendSliceAssumeCapacity(payloads.items[start_index..end_index]); + } + + const block_ref = indexToRef(switch_block); + if (strat.tag == .break_operand and strat.elide_store_to_block_ptr_instructions and ri.rl != .ref) + return rvalue(parent_gz, ri, block_ref, switch_node); + return block_ref; +} + +fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + + if (astgen.fn_block == null) { + return astgen.failNode(node, "'return' outside function scope", .{}); + } + + if (gz.any_defer_node != 0) { + return astgen.failNodeNotes(node, "cannot return from defer expression", .{}, &.{ + try astgen.errNoteNode( + gz.any_defer_node, + "defer expression here", + .{}, + ), + }); + } + + // Ensure debug line/column information is emitted for this return expression. + // Then we will save the line/column so that we can emit another one that goes + // "backwards" because we want to evaluate the operand, but then put the debug + // info back at the return keyword for error return tracing. + if (!gz.force_comptime) { + try emitDbgNode(gz, node); + } + const ret_line = astgen.source_line - gz.decl_line; + const ret_column = astgen.source_column; + + const defer_outer = &astgen.fn_block.?.base; + + const operand_node = node_datas[node].lhs; + if (operand_node == 0) { + // Returning a void value; skip error defers. + try genDefers(gz, defer_outer, scope, .normal_only); + + // As our last action before the return, "pop" the error trace if needed + _ = try gz.addRestoreErrRetIndex(.ret, .always); + + _ = try gz.addUnNode(.ret_node, .void_value, node); + return Zir.Inst.Ref.unreachable_value; + } + + if (node_tags[operand_node] == .error_value) { + // Hot path for `return error.Foo`. This bypasses result location logic as well as logic + // for detecting whether to add something to the function's inferred error set. + const ident_token = node_datas[operand_node].rhs; + const err_name_str_index = try astgen.identAsString(ident_token); + const defer_counts = countDefers(defer_outer, scope); + if (!defer_counts.need_err_code) { + try genDefers(gz, defer_outer, scope, .both_sans_err); + try emitDbgStmt(gz, ret_line, ret_column); + _ = try gz.addStrTok(.ret_err_value, err_name_str_index, ident_token); + return Zir.Inst.Ref.unreachable_value; + } + const err_code = try gz.addStrTok(.ret_err_value_code, err_name_str_index, ident_token); + try genDefers(gz, defer_outer, scope, .{ .both = err_code }); + try emitDbgStmt(gz, ret_line, ret_column); + _ = try gz.addUnNode(.ret_node, err_code, node); + return Zir.Inst.Ref.unreachable_value; + } + + const ri: ResultInfo = if (nodeMayNeedMemoryLocation(tree, operand_node, true)) .{ + .rl = .{ .ptr = .{ .inst = try gz.addNode(.ret_ptr, node) } }, + .ctx = .@"return", + } else .{ + .rl = .{ .ty = try gz.addNode(.ret_type, node) }, + .ctx = .@"return", + }; + const prev_anon_name_strategy = gz.anon_name_strategy; + gz.anon_name_strategy = .func; + const operand = try reachableExpr(gz, scope, ri, operand_node, node); + gz.anon_name_strategy = prev_anon_name_strategy; + + switch (nodeMayEvalToError(tree, operand_node)) { + .never => { + // Returning a value that cannot be an error; skip error defers. + try genDefers(gz, defer_outer, scope, .normal_only); + + // As our last action before the return, "pop" the error trace if needed + _ = try gz.addRestoreErrRetIndex(.ret, .always); + + try emitDbgStmt(gz, ret_line, ret_column); + try gz.addRet(ri, operand, node); + return Zir.Inst.Ref.unreachable_value; + }, + .always => { + // Value is always an error. Emit both error defers and regular defers. + const err_code = if (ri.rl == .ptr) try gz.addUnNode(.load, ri.rl.ptr.inst, node) else operand; + try genDefers(gz, defer_outer, scope, .{ .both = err_code }); + try emitDbgStmt(gz, ret_line, ret_column); + try gz.addRet(ri, operand, node); + return Zir.Inst.Ref.unreachable_value; + }, + .maybe => { + const defer_counts = countDefers(defer_outer, scope); + if (!defer_counts.have_err) { + // Only regular defers; no branch needed. + try genDefers(gz, defer_outer, scope, .normal_only); + try emitDbgStmt(gz, ret_line, ret_column); + + // As our last action before the return, "pop" the error trace if needed + const result = if (ri.rl == .ptr) try gz.addUnNode(.load, ri.rl.ptr.inst, node) else operand; + _ = try gz.addRestoreErrRetIndex(.ret, .{ .if_non_error = result }); + + try gz.addRet(ri, operand, node); + return Zir.Inst.Ref.unreachable_value; + } + + // Emit conditional branch for generating errdefers. + const result = if (ri.rl == .ptr) try gz.addUnNode(.load, ri.rl.ptr.inst, node) else operand; + const is_non_err = try gz.addUnNode(.ret_is_non_err, result, node); + const condbr = try gz.addCondBr(.condbr, node); + + var then_scope = gz.makeSubBlock(scope); + defer then_scope.unstack(); + + try genDefers(&then_scope, defer_outer, scope, .normal_only); + + // As our last action before the return, "pop" the error trace if needed + _ = try then_scope.addRestoreErrRetIndex(.ret, .always); + + try emitDbgStmt(&then_scope, ret_line, ret_column); + try then_scope.addRet(ri, operand, node); + + var else_scope = gz.makeSubBlock(scope); + defer else_scope.unstack(); + + const which_ones: DefersToEmit = if (!defer_counts.need_err_code) .both_sans_err else .{ + .both = try else_scope.addUnNode(.err_union_code, result, node), + }; + try genDefers(&else_scope, defer_outer, scope, which_ones); + try emitDbgStmt(&else_scope, ret_line, ret_column); + try else_scope.addRet(ri, operand, node); + + try setCondBrPayload(condbr, is_non_err, &then_scope, 0, &else_scope, 0); + + return Zir.Inst.Ref.unreachable_value; + }, + } +} + +/// Parses the string `buf` as a base 10 integer of type `u16`. +/// +/// Unlike std.fmt.parseInt, does not allow the '_' character in `buf`. +fn parseBitCount(buf: []const u8) std.fmt.ParseIntError!u16 { + if (buf.len == 0) return error.InvalidCharacter; + + var x: u16 = 0; + + for (buf) |c| { + const digit = switch (c) { + '0'...'9' => c - '0', + else => return error.InvalidCharacter, + }; + + if (x != 0) x = try std.math.mul(u16, x, 10); + x = try std.math.add(u16, x, @as(u16, digit)); + } + + return x; +} + +fn identifier( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + ident: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const tracy = trace(@src()); + defer tracy.end(); + + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + + const ident_token = main_tokens[ident]; + const ident_name_raw = tree.tokenSlice(ident_token); + if (mem.eql(u8, ident_name_raw, "_")) { + return astgen.failNode(ident, "'_' used as an identifier without @\"_\" syntax", .{}); + } + + // if not @"" syntax, just use raw token slice + if (ident_name_raw[0] != '@') { + if (primitive_instrs.get(ident_name_raw)) |zir_const_ref| { + return rvalue(gz, ri, zir_const_ref, ident); + } + + if (ident_name_raw.len >= 2) integer: { + const first_c = ident_name_raw[0]; + if (first_c == 'i' or first_c == 'u') { + const signedness: std.builtin.Signedness = switch (first_c == 'i') { + true => .signed, + false => .unsigned, + }; + if (ident_name_raw.len >= 3 and ident_name_raw[1] == '0') { + return astgen.failNode( + ident, + "primitive integer type '{s}' has leading zero", + .{ident_name_raw}, + ); + } + const bit_count = parseBitCount(ident_name_raw[1..]) catch |err| switch (err) { + error.Overflow => return astgen.failNode( + ident, + "primitive integer type '{s}' exceeds maximum bit width of 65535", + .{ident_name_raw}, + ), + error.InvalidCharacter => break :integer, + }; + const result = try gz.add(.{ + .tag = .int_type, + .data = .{ .int_type = .{ + .src_node = gz.nodeIndexToRelative(ident), + .signedness = signedness, + .bit_count = bit_count, + } }, + }); + return rvalue(gz, ri, result, ident); + } + } + } + + // Local variables, including function parameters. + return localVarRef(gz, scope, ri, ident, ident_token); +} + +fn localVarRef( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + ident: Ast.Node.Index, + ident_token: Ast.TokenIndex, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + const name_str_index = try astgen.identAsString(ident_token); + var s = scope; + var found_already: ?Ast.Node.Index = null; // we have found a decl with the same name already + var num_namespaces_out: u32 = 0; + var capturing_namespace: ?*Scope.Namespace = null; + while (true) switch (s.tag) { + .local_val => { + const local_val = s.cast(Scope.LocalVal).?; + + if (local_val.name == name_str_index) { + // Locals cannot shadow anything, so we do not need to look for ambiguous + // references in this case. + if (ri.rl == .discard and ri.ctx == .assignment) { + local_val.discarded = ident_token; + } else { + local_val.used = ident_token; + } + + const value_inst = try tunnelThroughClosure( + gz, + ident, + num_namespaces_out, + capturing_namespace, + local_val.inst, + local_val.token_src, + gpa, + ); + + return rvalue(gz, ri, value_inst, ident); + } + s = local_val.parent; + }, + .local_ptr => { + const local_ptr = s.cast(Scope.LocalPtr).?; + if (local_ptr.name == name_str_index) { + if (ri.rl == .discard and ri.ctx == .assignment) { + local_ptr.discarded = ident_token; + } else { + local_ptr.used = ident_token; + } + + // Can't close over a runtime variable + if (num_namespaces_out != 0 and !local_ptr.maybe_comptime) { + const ident_name = try astgen.identifierTokenString(ident_token); + return astgen.failNodeNotes(ident, "mutable '{s}' not accessible from here", .{ident_name}, &.{ + try astgen.errNoteTok(local_ptr.token_src, "declared mutable here", .{}), + try astgen.errNoteNode(capturing_namespace.?.node, "crosses namespace boundary here", .{}), + }); + } + + const ptr_inst = try tunnelThroughClosure( + gz, + ident, + num_namespaces_out, + capturing_namespace, + local_ptr.ptr, + local_ptr.token_src, + gpa, + ); + + switch (ri.rl) { + .ref => return ptr_inst, + else => { + const loaded = try gz.addUnNode(.load, ptr_inst, ident); + return rvalue(gz, ri, loaded, ident); + }, + } + } + s = local_ptr.parent; + }, + .gen_zir => s = s.cast(GenZir).?.parent, + .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, + .namespace, .enum_namespace => { + const ns = s.cast(Scope.Namespace).?; + if (ns.decls.get(name_str_index)) |i| { + if (found_already) |f| { + return astgen.failNodeNotes(ident, "ambiguous reference", .{}, &.{ + try astgen.errNoteNode(f, "declared here", .{}), + try astgen.errNoteNode(i, "also declared here", .{}), + }); + } + // We found a match but must continue looking for ambiguous references to decls. + found_already = i; + } + if (s.tag == .namespace) num_namespaces_out += 1; + capturing_namespace = ns; + s = ns.parent; + }, + .top => break, + }; + if (found_already == null) { + const ident_name = try astgen.identifierTokenString(ident_token); + return astgen.failNode(ident, "use of undeclared identifier '{s}'", .{ident_name}); + } + + // Decl references happen by name rather than ZIR index so that when unrelated + // decls are modified, ZIR code containing references to them can be unmodified. + switch (ri.rl) { + .ref => return gz.addStrTok(.decl_ref, name_str_index, ident_token), + else => { + const result = try gz.addStrTok(.decl_val, name_str_index, ident_token); + return rvalue(gz, ri, result, ident); + }, + } +} + +/// Adds a capture to a namespace, if needed. +/// Returns the index of the closure_capture instruction. +fn tunnelThroughClosure( + gz: *GenZir, + inner_ref_node: Ast.Node.Index, + num_tunnels: u32, + ns: ?*Scope.Namespace, + value: Zir.Inst.Ref, + token: Ast.TokenIndex, + gpa: Allocator, +) !Zir.Inst.Ref { + // For trivial values, we don't need a tunnel. + // Just return the ref. + if (num_tunnels == 0 or refToIndex(value) == null) { + return value; + } + + // Otherwise we need a tunnel. Check if this namespace + // already has one for this value. + const gop = try ns.?.captures.getOrPut(gpa, refToIndex(value).?); + if (!gop.found_existing) { + // Make a new capture for this value but don't add it to the declaring_gz yet + try gz.astgen.instructions.append(gz.astgen.gpa, .{ + .tag = .closure_capture, + .data = .{ .un_tok = .{ + .operand = value, + .src_tok = ns.?.declaring_gz.?.tokenIndexToRelative(token), + } }, + }); + gop.value_ptr.* = @intCast(Zir.Inst.Index, gz.astgen.instructions.len - 1); + } + + // Add an instruction to get the value from the closure into + // our current context + return try gz.addInstNode(.closure_get, gop.value_ptr.*, inner_ref_node); +} + +fn stringLiteral( + gz: *GenZir, + ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const str_lit_token = main_tokens[node]; + const str = try astgen.strLitAsString(str_lit_token); + const result = try gz.add(.{ + .tag = .str, + .data = .{ .str = .{ + .start = str.index, + .len = str.len, + } }, + }); + return rvalue(gz, ri, result, node); +} + +fn multilineStringLiteral( + gz: *GenZir, + ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const str = try astgen.strLitNodeAsString(node); + const result = try gz.add(.{ + .tag = .str, + .data = .{ .str = .{ + .start = str.index, + .len = str.len, + } }, + }); + return rvalue(gz, ri, result, node); +} + +fn charLiteral(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const main_token = main_tokens[node]; + const slice = tree.tokenSlice(main_token); + + switch (std.zig.parseCharLiteral(slice)) { + .success => |codepoint| { + const result = try gz.addInt(codepoint); + return rvalue(gz, ri, result, node); + }, + .failure => |err| return astgen.failWithStrLitError(err, main_token, slice, 0), + } +} + +const Sign = enum { negative, positive }; + +fn numberLiteral(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index, source_node: Ast.Node.Index, sign: Sign) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const num_token = main_tokens[node]; + const bytes = tree.tokenSlice(num_token); + + const result: Zir.Inst.Ref = switch (std.zig.parseNumberLiteral(bytes)) { + .int => |num| switch (num) { + 0 => .zero, + 1 => .one, + else => try gz.addInt(num), + }, + .big_int => |base| big: { + const gpa = astgen.gpa; + var big_int = try std.math.big.int.Managed.init(gpa); + defer big_int.deinit(); + const prefix_offset = @as(u8, 2) * @boolToInt(base != .decimal); + big_int.setString(@enumToInt(base), bytes[prefix_offset..]) catch |err| switch (err) { + error.InvalidCharacter => unreachable, // caught in `parseNumberLiteral` + error.InvalidBase => unreachable, // we only pass 16, 8, 2, see above + error.OutOfMemory => return error.OutOfMemory, + }; + + const limbs = big_int.limbs[0..big_int.len()]; + assert(big_int.isPositive()); + break :big try gz.addIntBig(limbs); + }, + .float => { + const unsigned_float_number = std.fmt.parseFloat(f128, bytes) catch |err| switch (err) { + error.InvalidCharacter => unreachable, // validated by tokenizer + }; + const float_number = switch (sign) { + .negative => -unsigned_float_number, + .positive => unsigned_float_number, + }; + // If the value fits into a f64 without losing any precision, store it that way. + @setFloatMode(.Strict); + const smaller_float = @floatCast(f64, float_number); + const bigger_again: f128 = smaller_float; + if (bigger_again == float_number) { + const result = try gz.addFloat(smaller_float); + return rvalue(gz, ri, result, source_node); + } + // We need to use 128 bits. Break the float into 4 u32 values so we can + // put it into the `extra` array. + const int_bits = @bitCast(u128, float_number); + const result = try gz.addPlNode(.float128, node, Zir.Inst.Float128{ + .piece0 = @truncate(u32, int_bits), + .piece1 = @truncate(u32, int_bits >> 32), + .piece2 = @truncate(u32, int_bits >> 64), + .piece3 = @truncate(u32, int_bits >> 96), + }); + return rvalue(gz, ri, result, source_node); + }, + .failure => |err| return astgen.failWithNumberError(err, num_token, bytes), + }; + + if (sign == .positive) { + return rvalue(gz, ri, result, source_node); + } else { + const negated = try gz.addUnNode(.negate, result, source_node); + return rvalue(gz, ri, negated, source_node); + } +} + +fn failWithNumberError(astgen: *AstGen, err: std.zig.number_literal.Error, token: Ast.TokenIndex, bytes: []const u8) InnerError { + const is_float = std.mem.indexOfScalar(u8, bytes, '.') != null; + switch (err) { + .leading_zero => if (is_float) { + return astgen.failTok(token, "number '{s}' has leading zero", .{bytes}); + } else { + return astgen.failTokNotes(token, "number '{s}' has leading zero", .{bytes}, &.{ + try astgen.errNoteTok(token, "use '0o' prefix for octal literals", .{}), + }); + }, + .digit_after_base => return astgen.failTok(token, "expected a digit after base prefix", .{}), + .upper_case_base => |i| return astgen.failOff(token, @intCast(u32, i), "base prefix must be lowercase", .{}), + .invalid_float_base => |i| return astgen.failOff(token, @intCast(u32, i), "invalid base for float literal", .{}), + .repeated_underscore => |i| return astgen.failOff(token, @intCast(u32, i), "repeated digit separator", .{}), + .invalid_underscore_after_special => |i| return astgen.failOff(token, @intCast(u32, i), "expected digit before digit separator", .{}), + .invalid_digit => |info| return astgen.failOff(token, @intCast(u32, info.i), "invalid digit '{c}' for {s} base", .{ bytes[info.i], @tagName(info.base) }), + .invalid_digit_exponent => |i| return astgen.failOff(token, @intCast(u32, i), "invalid digit '{c}' in exponent", .{bytes[i]}), + .duplicate_exponent => |i| return astgen.failOff(token, @intCast(u32, i), "duplicate exponent", .{}), + .invalid_hex_exponent => |i| return astgen.failOff(token, @intCast(u32, i), "hex exponent in decimal float", .{}), + .exponent_after_underscore => |i| return astgen.failOff(token, @intCast(u32, i), "expected digit before exponent", .{}), + .special_after_underscore => |i| return astgen.failOff(token, @intCast(u32, i), "expected digit before '{c}'", .{bytes[i]}), + .trailing_special => |i| return astgen.failOff(token, @intCast(u32, i), "expected digit after '{c}'", .{bytes[i - 1]}), + .trailing_underscore => |i| return astgen.failOff(token, @intCast(u32, i), "trailing digit separator", .{}), + .duplicate_period => unreachable, // Validated by tokenizer + .invalid_character => unreachable, // Validated by tokenizer + .invalid_exponent_sign => unreachable, // Validated by tokenizer + } +} + +fn asmExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + full: Ast.full.Asm, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + const token_tags = tree.tokens.items(.tag); + + const TagAndTmpl = struct { tag: Zir.Inst.Extended, tmpl: u32 }; + const tag_and_tmpl: TagAndTmpl = switch (node_tags[full.ast.template]) { + .string_literal => .{ + .tag = .@"asm", + .tmpl = (try astgen.strLitAsString(main_tokens[full.ast.template])).index, + }, + .multiline_string_literal => .{ + .tag = .@"asm", + .tmpl = (try astgen.strLitNodeAsString(full.ast.template)).index, + }, + else => .{ + .tag = .asm_expr, + .tmpl = @enumToInt(try comptimeExpr(gz, scope, .{ .rl = .none }, full.ast.template)), + }, + }; + + // See https://github.com/ziglang/zig/issues/215 and related issues discussing + // possible inline assembly improvements. Until then here is status quo AstGen + // for assembly syntax. It's used by std lib crypto aesni.zig. + const is_container_asm = astgen.fn_block == null; + if (is_container_asm) { + if (full.volatile_token) |t| + return astgen.failTok(t, "volatile is meaningless on global assembly", .{}); + if (full.outputs.len != 0 or full.inputs.len != 0 or full.first_clobber != null) + return astgen.failNode(node, "global assembly cannot have inputs, outputs, or clobbers", .{}); + } else { + if (full.outputs.len == 0 and full.volatile_token == null) { + return astgen.failNode(node, "assembly expression with no output must be marked volatile", .{}); + } + } + if (full.outputs.len > 32) { + return astgen.failNode(full.outputs[32], "too many asm outputs", .{}); + } + var outputs_buffer: [32]Zir.Inst.Asm.Output = undefined; + const outputs = outputs_buffer[0..full.outputs.len]; + + var output_type_bits: u32 = 0; + + for (full.outputs, 0..) |output_node, i| { + const symbolic_name = main_tokens[output_node]; + const name = try astgen.identAsString(symbolic_name); + const constraint_token = symbolic_name + 2; + const constraint = (try astgen.strLitAsString(constraint_token)).index; + const has_arrow = token_tags[symbolic_name + 4] == .arrow; + if (has_arrow) { + if (output_type_bits != 0) { + return astgen.failNode(output_node, "inline assembly allows up to one output value", .{}); + } + output_type_bits |= @as(u32, 1) << @intCast(u5, i); + const out_type_node = node_datas[output_node].lhs; + const out_type_inst = try typeExpr(gz, scope, out_type_node); + outputs[i] = .{ + .name = name, + .constraint = constraint, + .operand = out_type_inst, + }; + } else { + const ident_token = symbolic_name + 4; + // TODO have a look at #215 and related issues and decide how to + // handle outputs. Do we want this to be identifiers? + // Or maybe we want to force this to be expressions with a pointer type. + outputs[i] = .{ + .name = name, + .constraint = constraint, + .operand = try localVarRef(gz, scope, .{ .rl = .ref }, node, ident_token), + }; + } + } + + if (full.inputs.len > 32) { + return astgen.failNode(full.inputs[32], "too many asm inputs", .{}); + } + var inputs_buffer: [32]Zir.Inst.Asm.Input = undefined; + const inputs = inputs_buffer[0..full.inputs.len]; + + for (full.inputs, 0..) |input_node, i| { + const symbolic_name = main_tokens[input_node]; + const name = try astgen.identAsString(symbolic_name); + const constraint_token = symbolic_name + 2; + const constraint = (try astgen.strLitAsString(constraint_token)).index; + const operand = try expr(gz, scope, .{ .rl = .none }, node_datas[input_node].lhs); + inputs[i] = .{ + .name = name, + .constraint = constraint, + .operand = operand, + }; + } + + var clobbers_buffer: [32]u32 = undefined; + var clobber_i: usize = 0; + if (full.first_clobber) |first_clobber| clobbers: { + // asm ("foo" ::: "a", "b") + // asm ("foo" ::: "a", "b",) + var tok_i = first_clobber; + while (true) : (tok_i += 1) { + if (clobber_i >= clobbers_buffer.len) { + return astgen.failTok(tok_i, "too many asm clobbers", .{}); + } + clobbers_buffer[clobber_i] = (try astgen.strLitAsString(tok_i)).index; + clobber_i += 1; + tok_i += 1; + switch (token_tags[tok_i]) { + .r_paren => break :clobbers, + .comma => { + if (token_tags[tok_i + 1] == .r_paren) { + break :clobbers; + } else { + continue; + } + }, + else => unreachable, + } + } + } + + const result = try gz.addAsm(.{ + .tag = tag_and_tmpl.tag, + .node = node, + .asm_source = tag_and_tmpl.tmpl, + .is_volatile = full.volatile_token != null, + .output_type_bits = output_type_bits, + .outputs = outputs, + .inputs = inputs, + .clobbers = clobbers_buffer[0..clobber_i], + }); + return rvalue(gz, ri, result, node); +} + +fn as( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + lhs: Ast.Node.Index, + rhs: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const dest_type = try typeExpr(gz, scope, lhs); + switch (ri.rl) { + .none, .discard, .ref, .ty, .coerced_ty => { + const result = try reachableExpr(gz, scope, .{ .rl = .{ .ty = dest_type } }, rhs, node); + return rvalue(gz, ri, result, node); + }, + .ptr => |result_ptr| { + return asRlPtr(gz, scope, ri, node, result_ptr.inst, rhs, dest_type); + }, + .inferred_ptr => |result_ptr| { + return asRlPtr(gz, scope, ri, node, result_ptr, rhs, dest_type); + }, + .block_ptr => |block_scope| { + return asRlPtr(gz, scope, ri, node, block_scope.rl_ptr, rhs, dest_type); + }, + } +} + +fn unionInit( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + params: []const Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const union_type = try typeExpr(gz, scope, params[0]); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]); + const field_type = try gz.addPlNode(.field_type_ref, params[1], Zir.Inst.FieldTypeRef{ + .container_type = union_type, + .field_name = field_name, + }); + const init = try reachableExpr(gz, scope, .{ .rl = .{ .ty = field_type } }, params[2], node); + const result = try gz.addPlNode(.union_init, node, Zir.Inst.UnionInit{ + .union_type = union_type, + .init = init, + .field_name = field_name, + }); + return rvalue(gz, ri, result, node); +} + +fn asRlPtr( + parent_gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + src_node: Ast.Node.Index, + result_ptr: Zir.Inst.Ref, + operand_node: Ast.Node.Index, + dest_type: Zir.Inst.Ref, +) InnerError!Zir.Inst.Ref { + var as_scope = try parent_gz.makeCoercionScope(scope, dest_type, result_ptr, src_node); + defer as_scope.unstack(); + + const result = try reachableExpr(&as_scope, &as_scope.base, .{ .rl = .{ .block_ptr = &as_scope } }, operand_node, src_node); + return as_scope.finishCoercion(parent_gz, ri, operand_node, result, dest_type); +} + +fn bitCast( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + lhs: Ast.Node.Index, + rhs: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const dest_type = try reachableTypeExpr(gz, scope, lhs, node); + const operand = try reachableExpr(gz, scope, .{ .rl = .none }, rhs, node); + const result = try gz.addPlNode(.bitcast, node, Zir.Inst.Bin{ + .lhs = dest_type, + .rhs = operand, + }); + return rvalue(gz, ri, result, node); +} + +fn typeOf( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + args: []const Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + if (args.len < 1) { + return astgen.failNode(node, "expected at least 1 argument, found 0", .{}); + } + const gpa = astgen.gpa; + if (args.len == 1) { + const typeof_inst = try gz.makeBlockInst(.typeof_builtin, node); + + var typeof_scope = gz.makeSubBlock(scope); + typeof_scope.force_comptime = false; + typeof_scope.c_import = false; + defer typeof_scope.unstack(); + + const ty_expr = try reachableExpr(&typeof_scope, &typeof_scope.base, .{ .rl = .none }, args[0], node); + if (!gz.refIsNoReturn(ty_expr)) { + _ = try typeof_scope.addBreak(.break_inline, typeof_inst, ty_expr); + } + try typeof_scope.setBlockBody(typeof_inst); + + // typeof_scope unstacked now, can add new instructions to gz + try gz.instructions.append(gpa, typeof_inst); + return rvalue(gz, ri, indexToRef(typeof_inst), node); + } + const payload_size: u32 = std.meta.fields(Zir.Inst.TypeOfPeer).len; + const payload_index = try reserveExtra(astgen, payload_size + args.len); + var args_index = payload_index + payload_size; + + const typeof_inst = try gz.addExtendedMultiOpPayloadIndex(.typeof_peer, payload_index, args.len); + + var typeof_scope = gz.makeSubBlock(scope); + typeof_scope.force_comptime = false; + + for (args, 0..) |arg, i| { + const param_ref = try reachableExpr(&typeof_scope, &typeof_scope.base, .{ .rl = .none }, arg, node); + astgen.extra.items[args_index + i] = @enumToInt(param_ref); + } + _ = try typeof_scope.addBreak(.break_inline, refToIndex(typeof_inst).?, .void_value); + + const body = typeof_scope.instructionsSlice(); + const body_len = astgen.countBodyLenAfterFixups(body); + astgen.setExtra(payload_index, Zir.Inst.TypeOfPeer{ + .body_len = @intCast(u32, body_len), + .body_index = @intCast(u32, astgen.extra.items.len), + .src_node = gz.nodeIndexToRelative(node), + }); + try astgen.extra.ensureUnusedCapacity(gpa, body_len); + astgen.appendBodyWithFixups(body); + typeof_scope.unstack(); + + return rvalue(gz, ri, typeof_inst, node); +} + +fn builtinCall( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + params: []const Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const main_tokens = tree.nodes.items(.main_token); + + const builtin_token = main_tokens[node]; + const builtin_name = tree.tokenSlice(builtin_token); + + // We handle the different builtins manually because they have different semantics depending + // on the function. For example, `@as` and others participate in result location semantics, + // and `@cImport` creates a special scope that collects a .c source code text buffer. + // Also, some builtins have a variable number of parameters. + + const info = BuiltinFn.list.get(builtin_name) orelse { + return astgen.failNode(node, "invalid builtin function: '{s}'", .{ + builtin_name, + }); + }; + if (info.param_count) |expected| { + if (expected != params.len) { + const s = if (expected == 1) "" else "s"; + return astgen.failNode(node, "expected {d} argument{s}, found {d}", .{ + expected, s, params.len, + }); + } + } + + switch (info.tag) { + .import => { + const node_tags = tree.nodes.items(.tag); + const operand_node = params[0]; + + if (node_tags[operand_node] != .string_literal) { + // Spec reference: https://github.com/ziglang/zig/issues/2206 + return astgen.failNode(operand_node, "@import operand must be a string literal", .{}); + } + const str_lit_token = main_tokens[operand_node]; + const str = try astgen.strLitAsString(str_lit_token); + const str_slice = astgen.string_bytes.items[str.index..][0..str.len]; + if (mem.indexOfScalar(u8, str_slice, 0) != null) { + return astgen.failTok(str_lit_token, "import path cannot contain null bytes", .{}); + } else if (str.len == 0) { + return astgen.failTok(str_lit_token, "import path cannot be empty", .{}); + } + const result = try gz.addStrTok(.import, str.index, str_lit_token); + const gop = try astgen.imports.getOrPut(astgen.gpa, str.index); + if (!gop.found_existing) { + gop.value_ptr.* = str_lit_token; + } + return rvalue(gz, ri, result, node); + }, + .compile_log => { + const payload_index = try addExtra(gz.astgen, Zir.Inst.NodeMultiOp{ + .src_node = gz.nodeIndexToRelative(node), + }); + var extra_index = try reserveExtra(gz.astgen, params.len); + for (params) |param| { + const param_ref = try expr(gz, scope, .{ .rl = .none }, param); + astgen.extra.items[extra_index] = @enumToInt(param_ref); + extra_index += 1; + } + const result = try gz.addExtendedMultiOpPayloadIndex(.compile_log, payload_index, params.len); + return rvalue(gz, ri, result, node); + }, + .field => { + if (ri.rl == .ref) { + return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{ + .lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]), + .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]), + }); + } + const result = try gz.addPlNode(.field_val_named, node, Zir.Inst.FieldNamed{ + .lhs = try expr(gz, scope, .{ .rl = .none }, params[0]), + .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]), + }); + return rvalue(gz, ri, result, node); + }, + + // zig fmt: off + .as => return as( gz, scope, ri, node, params[0], params[1]), + .bit_cast => return bitCast( gz, scope, ri, node, params[0], params[1]), + .TypeOf => return typeOf( gz, scope, ri, node, params), + .union_init => return unionInit(gz, scope, ri, node, params), + .c_import => return cImport( gz, scope, node, params[0]), + // zig fmt: on + + .@"export" => { + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + // This function causes a Decl to be exported. The first parameter is not an expression, + // but an identifier of the Decl to be exported. + var namespace: Zir.Inst.Ref = .none; + var decl_name: u32 = 0; + switch (node_tags[params[0]]) { + .identifier => { + const ident_token = main_tokens[params[0]]; + if (isPrimitive(tree.tokenSlice(ident_token))) { + return astgen.failTok(ident_token, "unable to export primitive value", .{}); + } + decl_name = try astgen.identAsString(ident_token); + + var s = scope; + var found_already: ?Ast.Node.Index = null; // we have found a decl with the same name already + while (true) switch (s.tag) { + .local_val => { + const local_val = s.cast(Scope.LocalVal).?; + if (local_val.name == decl_name) { + local_val.used = ident_token; + _ = try gz.addPlNode(.export_value, node, Zir.Inst.ExportValue{ + .operand = local_val.inst, + .options = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .export_options_type } }, params[1]), + }); + return rvalue(gz, ri, .void_value, node); + } + s = local_val.parent; + }, + .local_ptr => { + const local_ptr = s.cast(Scope.LocalPtr).?; + if (local_ptr.name == decl_name) { + if (!local_ptr.maybe_comptime) + return astgen.failNode(params[0], "unable to export runtime-known value", .{}); + local_ptr.used = ident_token; + const loaded = try gz.addUnNode(.load, local_ptr.ptr, node); + _ = try gz.addPlNode(.export_value, node, Zir.Inst.ExportValue{ + .operand = loaded, + .options = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .export_options_type } }, params[1]), + }); + return rvalue(gz, ri, .void_value, node); + } + s = local_ptr.parent; + }, + .gen_zir => s = s.cast(GenZir).?.parent, + .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, + .namespace, .enum_namespace => { + const ns = s.cast(Scope.Namespace).?; + if (ns.decls.get(decl_name)) |i| { + if (found_already) |f| { + return astgen.failNodeNotes(node, "ambiguous reference", .{}, &.{ + try astgen.errNoteNode(f, "declared here", .{}), + try astgen.errNoteNode(i, "also declared here", .{}), + }); + } + // We found a match but must continue looking for ambiguous references to decls. + found_already = i; + } + s = ns.parent; + }, + .top => break, + }; + }, + .field_access => { + const namespace_node = node_datas[params[0]].lhs; + namespace = try typeExpr(gz, scope, namespace_node); + const dot_token = main_tokens[params[0]]; + const field_ident = dot_token + 1; + decl_name = try astgen.identAsString(field_ident); + }, + else => return astgen.failNode(params[0], "symbol to export must identify a declaration", .{}), + } + const options = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .export_options_type } }, params[1]); + _ = try gz.addPlNode(.@"export", node, Zir.Inst.Export{ + .namespace = namespace, + .decl_name = decl_name, + .options = options, + }); + return rvalue(gz, ri, .void_value, node); + }, + .@"extern" => { + const type_inst = try typeExpr(gz, scope, params[0]); + const options = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .extern_options_type } }, params[1]); + const result = try gz.addExtendedPayload(.builtin_extern, Zir.Inst.BinNode{ + .node = gz.nodeIndexToRelative(node), + .lhs = type_inst, + .rhs = options, + }); + return rvalue(gz, ri, result, node); + }, + .fence => { + const order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[0]); + _ = try gz.addExtendedPayload(.fence, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = order, + }); + return rvalue(gz, ri, .void_value, node); + }, + .set_float_mode => { + const order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .float_mode_type } }, params[0]); + _ = try gz.addExtendedPayload(.set_float_mode, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = order, + }); + return rvalue(gz, ri, .void_value, node); + }, + .set_align_stack => { + const order = try expr(gz, scope, align_ri, params[0]); + _ = try gz.addExtendedPayload(.set_align_stack, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = order, + }); + return rvalue(gz, ri, .void_value, node); + }, + .set_cold => { + const order = try expr(gz, scope, ri, params[0]); + _ = try gz.addExtendedPayload(.set_cold, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = order, + }); + return rvalue(gz, ri, .void_value, node); + }, + + .src => { + const token_starts = tree.tokens.items(.start); + const node_start = token_starts[tree.firstToken(node)]; + astgen.advanceSourceCursor(node_start); + const result = try gz.addExtendedPayload(.builtin_src, Zir.Inst.Src{ + .node = gz.nodeIndexToRelative(node), + .line = astgen.source_line, + .column = astgen.source_column, + }); + return rvalue(gz, ri, result, node); + }, + + // zig fmt: off + .This => return rvalue(gz, ri, try gz.addNodeExtended(.this, node), node), + .return_address => return rvalue(gz, ri, try gz.addNodeExtended(.ret_addr, node), node), + .error_return_trace => return rvalue(gz, ri, try gz.addNodeExtended(.error_return_trace, node), node), + .frame => return rvalue(gz, ri, try gz.addNodeExtended(.frame, node), node), + .frame_address => return rvalue(gz, ri, try gz.addNodeExtended(.frame_address, node), node), + .breakpoint => return rvalue(gz, ri, try gz.addNodeExtended(.breakpoint, node), node), + + .type_info => return simpleUnOpType(gz, scope, ri, node, params[0], .type_info), + .size_of => return simpleUnOpType(gz, scope, ri, node, params[0], .size_of), + .bit_size_of => return simpleUnOpType(gz, scope, ri, node, params[0], .bit_size_of), + .align_of => return simpleUnOpType(gz, scope, ri, node, params[0], .align_of), + + .ptr_to_int => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .ptr_to_int), + .compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .compile_error), + .set_eval_branch_quota => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0], .set_eval_branch_quota), + .enum_to_int => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .enum_to_int), + .bool_to_int => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .bool_to_int), + .embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .embed_file), + .error_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .anyerror_type } }, params[0], .error_name), + .set_runtime_safety => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .set_runtime_safety), + .sqrt => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sqrt), + .sin => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sin), + .cos => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .cos), + .tan => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .tan), + .exp => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .exp), + .exp2 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .exp2), + .log => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log), + .log2 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log2), + .log10 => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .log10), + .fabs => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .fabs), + .floor => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .floor), + .ceil => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .ceil), + .trunc => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .trunc), + .round => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .round), + .tag_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .tag_name), + .type_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .type_name), + .Frame => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .frame_type), + .frame_size => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .frame_size), + + .float_to_int => return typeCast(gz, scope, ri, node, params[0], params[1], .float_to_int), + .int_to_float => return typeCast(gz, scope, ri, node, params[0], params[1], .int_to_float), + .int_to_ptr => return typeCast(gz, scope, ri, node, params[0], params[1], .int_to_ptr), + .int_to_enum => return typeCast(gz, scope, ri, node, params[0], params[1], .int_to_enum), + .float_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .float_cast), + .int_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .int_cast), + .ptr_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .ptr_cast), + .truncate => return typeCast(gz, scope, ri, node, params[0], params[1], .truncate), + // zig fmt: on + + .Type => { + const operand = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .type_info_type } }, params[0]); + + const gpa = gz.astgen.gpa; + + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + + const payload_index = try gz.astgen.addExtra(Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = operand, + }); + const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + gz.astgen.instructions.appendAssumeCapacity(.{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = .reify, + .small = @enumToInt(gz.anon_name_strategy), + .operand = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + const result = indexToRef(new_index); + return rvalue(gz, ri, result, node); + }, + .panic => { + try emitDbgNode(gz, node); + return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], if (gz.force_comptime) .panic_comptime else .panic); + }, + .trap => { + try emitDbgNode(gz, node); + _ = try gz.addNode(.trap, node); + return rvalue(gz, ri, .void_value, node); + }, + .error_to_int => { + const operand = try expr(gz, scope, .{ .rl = .none }, params[0]); + const result = try gz.addExtendedPayload(.error_to_int, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = operand, + }); + return rvalue(gz, ri, result, node); + }, + .int_to_error => { + const operand = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u16_type } }, params[0]); + const result = try gz.addExtendedPayload(.int_to_error, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = operand, + }); + return rvalue(gz, ri, result, node); + }, + .align_cast => { + const dest_align = try comptimeExpr(gz, scope, align_ri, params[0]); + const rhs = try expr(gz, scope, .{ .rl = .none }, params[1]); + const result = try gz.addPlNode(.align_cast, node, Zir.Inst.Bin{ + .lhs = dest_align, + .rhs = rhs, + }); + return rvalue(gz, ri, result, node); + }, + .err_set_cast => { + try emitDbgNode(gz, node); + + const result = try gz.addExtendedPayload(.err_set_cast, Zir.Inst.BinNode{ + .lhs = try typeExpr(gz, scope, params[0]), + .rhs = try expr(gz, scope, .{ .rl = .none }, params[1]), + .node = gz.nodeIndexToRelative(node), + }); + return rvalue(gz, ri, result, node); + }, + .addrspace_cast => { + const result = try gz.addExtendedPayload(.addrspace_cast, Zir.Inst.BinNode{ + .lhs = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .address_space_type } }, params[0]), + .rhs = try expr(gz, scope, .{ .rl = .none }, params[1]), + .node = gz.nodeIndexToRelative(node), + }); + return rvalue(gz, ri, result, node); + }, + .const_cast => { + const operand = try expr(gz, scope, .{ .rl = .none }, params[0]); + const result = try gz.addExtendedPayload(.const_cast, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = operand, + }); + return rvalue(gz, ri, result, node); + }, + .volatile_cast => { + const operand = try expr(gz, scope, .{ .rl = .none }, params[0]); + const result = try gz.addExtendedPayload(.volatile_cast, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = operand, + }); + return rvalue(gz, ri, result, node); + }, + + // zig fmt: off + .has_decl => return hasDeclOrField(gz, scope, ri, node, params[0], params[1], .has_decl), + .has_field => return hasDeclOrField(gz, scope, ri, node, params[0], params[1], .has_field), + + .clz => return bitBuiltin(gz, scope, ri, node, params[0], .clz), + .ctz => return bitBuiltin(gz, scope, ri, node, params[0], .ctz), + .pop_count => return bitBuiltin(gz, scope, ri, node, params[0], .pop_count), + .byte_swap => return bitBuiltin(gz, scope, ri, node, params[0], .byte_swap), + .bit_reverse => return bitBuiltin(gz, scope, ri, node, params[0], .bit_reverse), + + .div_exact => return divBuiltin(gz, scope, ri, node, params[0], params[1], .div_exact), + .div_floor => return divBuiltin(gz, scope, ri, node, params[0], params[1], .div_floor), + .div_trunc => return divBuiltin(gz, scope, ri, node, params[0], params[1], .div_trunc), + .mod => return divBuiltin(gz, scope, ri, node, params[0], params[1], .mod), + .rem => return divBuiltin(gz, scope, ri, node, params[0], params[1], .rem), + + .shl_exact => return shiftOp(gz, scope, ri, node, params[0], params[1], .shl_exact), + .shr_exact => return shiftOp(gz, scope, ri, node, params[0], params[1], .shr_exact), + + .bit_offset_of => return offsetOf(gz, scope, ri, node, params[0], params[1], .bit_offset_of), + .offset_of => return offsetOf(gz, scope, ri, node, params[0], params[1], .offset_of), + + .c_undef => return simpleCBuiltin(gz, scope, ri, node, params[0], .c_undef), + .c_include => return simpleCBuiltin(gz, scope, ri, node, params[0], .c_include), + + .cmpxchg_strong => return cmpxchg(gz, scope, ri, node, params, 1), + .cmpxchg_weak => return cmpxchg(gz, scope, ri, node, params, 0), + // zig fmt: on + + .wasm_memory_size => { + const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]); + const result = try gz.addExtendedPayload(.wasm_memory_size, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = operand, + }); + return rvalue(gz, ri, result, node); + }, + .wasm_memory_grow => { + const index_arg = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]); + const delta_arg = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[1]); + const result = try gz.addExtendedPayload(.wasm_memory_grow, Zir.Inst.BinNode{ + .node = gz.nodeIndexToRelative(node), + .lhs = index_arg, + .rhs = delta_arg, + }); + return rvalue(gz, ri, result, node); + }, + .c_define => { + if (!gz.c_import) return gz.astgen.failNode(node, "C define valid only inside C import block", .{}); + const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0]); + const value = try comptimeExpr(gz, scope, .{ .rl = .none }, params[1]); + const result = try gz.addExtendedPayload(.c_define, Zir.Inst.BinNode{ + .node = gz.nodeIndexToRelative(node), + .lhs = name, + .rhs = value, + }); + return rvalue(gz, ri, result, node); + }, + + .splat => { + const len = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]); + const scalar = try expr(gz, scope, .{ .rl = .none }, params[1]); + const result = try gz.addPlNode(.splat, node, Zir.Inst.Bin{ + .lhs = len, + .rhs = scalar, + }); + return rvalue(gz, ri, result, node); + }, + .reduce => { + const op = try expr(gz, scope, .{ .rl = .{ .ty = .reduce_op_type } }, params[0]); + const scalar = try expr(gz, scope, .{ .rl = .none }, params[1]); + const result = try gz.addPlNode(.reduce, node, Zir.Inst.Bin{ + .lhs = op, + .rhs = scalar, + }); + return rvalue(gz, ri, result, node); + }, + + .max => { + const a = try expr(gz, scope, .{ .rl = .none }, params[0]); + const b = try expr(gz, scope, .{ .rl = .none }, params[1]); + const result = try gz.addPlNode(.max, node, Zir.Inst.Bin{ + .lhs = a, + .rhs = b, + }); + return rvalue(gz, ri, result, node); + }, + .min => { + const a = try expr(gz, scope, .{ .rl = .none }, params[0]); + const b = try expr(gz, scope, .{ .rl = .none }, params[1]); + const result = try gz.addPlNode(.min, node, Zir.Inst.Bin{ + .lhs = a, + .rhs = b, + }); + return rvalue(gz, ri, result, node); + }, + + .add_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .add_with_overflow), + .sub_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .sub_with_overflow), + .mul_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .mul_with_overflow), + .shl_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .shl_with_overflow), + + .atomic_load => { + const result = try gz.addPlNode(.atomic_load, node, Zir.Inst.AtomicLoad{ + // zig fmt: off + .elem_type = try typeExpr(gz, scope, params[0]), + .ptr = try expr (gz, scope, .{ .rl = .none }, params[1]), + .ordering = try expr (gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[2]), + // zig fmt: on + }); + return rvalue(gz, ri, result, node); + }, + .atomic_rmw => { + const int_type = try typeExpr(gz, scope, params[0]); + const result = try gz.addPlNode(.atomic_rmw, node, Zir.Inst.AtomicRmw{ + // zig fmt: off + .ptr = try expr(gz, scope, .{ .rl = .none }, params[1]), + .operation = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_rmw_op_type } }, params[2]), + .operand = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[3]), + .ordering = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[4]), + // zig fmt: on + }); + return rvalue(gz, ri, result, node); + }, + .atomic_store => { + const int_type = try typeExpr(gz, scope, params[0]); + _ = try gz.addPlNode(.atomic_store, node, Zir.Inst.AtomicStore{ + // zig fmt: off + .ptr = try expr(gz, scope, .{ .rl = .none }, params[1]), + .operand = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[2]), + .ordering = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[3]), + // zig fmt: on + }); + return rvalue(gz, ri, .void_value, node); + }, + .mul_add => { + const float_type = try typeExpr(gz, scope, params[0]); + const mulend1 = try expr(gz, scope, .{ .rl = .{ .coerced_ty = float_type } }, params[1]); + const mulend2 = try expr(gz, scope, .{ .rl = .{ .coerced_ty = float_type } }, params[2]); + const addend = try expr(gz, scope, .{ .rl = .{ .ty = float_type } }, params[3]); + const result = try gz.addPlNode(.mul_add, node, Zir.Inst.MulAdd{ + .mulend1 = mulend1, + .mulend2 = mulend2, + .addend = addend, + }); + return rvalue(gz, ri, result, node); + }, + .call => { + const modifier = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .modifier_type } }, params[0]); + const callee = try calleeExpr(gz, scope, params[1]); + const args = try expr(gz, scope, .{ .rl = .none }, params[2]); + const result = try gz.addPlNode(.builtin_call, node, Zir.Inst.BuiltinCall{ + .modifier = modifier, + .callee = callee, + .args = args, + .flags = .{ + .is_nosuspend = gz.nosuspend_node != 0, + .is_comptime = gz.force_comptime, + .ensure_result_used = false, + }, + }); + return rvalue(gz, ri, result, node); + }, + .field_parent_ptr => { + const parent_type = try typeExpr(gz, scope, params[0]); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]); + const result = try gz.addPlNode(.field_parent_ptr, node, Zir.Inst.FieldParentPtr{ + .parent_type = parent_type, + .field_name = field_name, + .field_ptr = try expr(gz, scope, .{ .rl = .none }, params[2]), + }); + return rvalue(gz, ri, result, node); + }, + .memcpy => { + _ = try gz.addPlNode(.memcpy, node, Zir.Inst.Memcpy{ + .dest = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_u8_type } }, params[0]), + .source = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_const_u8_type } }, params[1]), + .byte_count = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, params[2]), + }); + return rvalue(gz, ri, .void_value, node); + }, + .memset => { + _ = try gz.addPlNode(.memset, node, Zir.Inst.Memset{ + .dest = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_u8_type } }, params[0]), + .byte = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u8_type } }, params[1]), + .byte_count = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, params[2]), + }); + return rvalue(gz, ri, .void_value, node); + }, + .shuffle => { + const result = try gz.addPlNode(.shuffle, node, Zir.Inst.Shuffle{ + .elem_type = try typeExpr(gz, scope, params[0]), + .a = try expr(gz, scope, .{ .rl = .none }, params[1]), + .b = try expr(gz, scope, .{ .rl = .none }, params[2]), + .mask = try comptimeExpr(gz, scope, .{ .rl = .none }, params[3]), + }); + return rvalue(gz, ri, result, node); + }, + .select => { + const result = try gz.addExtendedPayload(.select, Zir.Inst.Select{ + .node = gz.nodeIndexToRelative(node), + .elem_type = try typeExpr(gz, scope, params[0]), + .pred = try expr(gz, scope, .{ .rl = .none }, params[1]), + .a = try expr(gz, scope, .{ .rl = .none }, params[2]), + .b = try expr(gz, scope, .{ .rl = .none }, params[3]), + }); + return rvalue(gz, ri, result, node); + }, + .async_call => { + const result = try gz.addExtendedPayload(.builtin_async_call, Zir.Inst.AsyncCall{ + .node = gz.nodeIndexToRelative(node), + .frame_buffer = try expr(gz, scope, .{ .rl = .none }, params[0]), + .result_ptr = try expr(gz, scope, .{ .rl = .none }, params[1]), + .fn_ptr = try expr(gz, scope, .{ .rl = .none }, params[2]), + .args = try expr(gz, scope, .{ .rl = .none }, params[3]), + }); + return rvalue(gz, ri, result, node); + }, + .Vector => { + const result = try gz.addPlNode(.vector_type, node, Zir.Inst.Bin{ + .lhs = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]), + .rhs = try typeExpr(gz, scope, params[1]), + }); + return rvalue(gz, ri, result, node); + }, + .prefetch => { + const ptr = try expr(gz, scope, .{ .rl = .none }, params[0]); + const options = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .prefetch_options_type } }, params[1]); + _ = try gz.addExtendedPayload(.prefetch, Zir.Inst.BinNode{ + .node = gz.nodeIndexToRelative(node), + .lhs = ptr, + .rhs = options, + }); + return rvalue(gz, ri, .void_value, node); + }, + .c_va_arg => { + if (astgen.fn_block == null) { + return astgen.failNode(node, "'@cVaArg' outside function scope", .{}); + } + const result = try gz.addExtendedPayload(.c_va_arg, Zir.Inst.BinNode{ + .node = gz.nodeIndexToRelative(node), + .lhs = try expr(gz, scope, .{ .rl = .none }, params[0]), + .rhs = try typeExpr(gz, scope, params[1]), + }); + return rvalue(gz, ri, result, node); + }, + .c_va_copy => { + if (astgen.fn_block == null) { + return astgen.failNode(node, "'@cVaCopy' outside function scope", .{}); + } + const result = try gz.addExtendedPayload(.c_va_copy, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = try expr(gz, scope, .{ .rl = .none }, params[0]), + }); + return rvalue(gz, ri, result, node); + }, + .c_va_end => { + if (astgen.fn_block == null) { + return astgen.failNode(node, "'@cVaEnd' outside function scope", .{}); + } + const result = try gz.addExtendedPayload(.c_va_end, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = try expr(gz, scope, .{ .rl = .none }, params[0]), + }); + return rvalue(gz, ri, result, node); + }, + .c_va_start => { + if (astgen.fn_block == null) { + return astgen.failNode(node, "'@cVaStart' outside function scope", .{}); + } + if (!astgen.fn_var_args) { + return astgen.failNode(node, "'@cVaStart' in a non-variadic function", .{}); + } + return rvalue(gz, ri, try gz.addNodeExtended(.c_va_start, node), node); + }, + + .work_item_id => { + if (astgen.fn_block == null) { + return astgen.failNode(node, "'@workItemId' outside function scope", .{}); + } + const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]); + const result = try gz.addExtendedPayload(.work_item_id, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = operand, + }); + return rvalue(gz, ri, result, node); + }, + .work_group_size => { + if (astgen.fn_block == null) { + return astgen.failNode(node, "'@workGroupSize' outside function scope", .{}); + } + const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]); + const result = try gz.addExtendedPayload(.work_group_size, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = operand, + }); + return rvalue(gz, ri, result, node); + }, + .work_group_id => { + if (astgen.fn_block == null) { + return astgen.failNode(node, "'@workGroupId' outside function scope", .{}); + } + const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0]); + const result = try gz.addExtendedPayload(.work_group_id, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = operand, + }); + return rvalue(gz, ri, result, node); + }, + } +} + +fn hasDeclOrField( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + lhs_node: Ast.Node.Index, + rhs_node: Ast.Node.Index, + tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const container_type = try typeExpr(gz, scope, lhs_node); + const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, rhs_node); + const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ + .lhs = container_type, + .rhs = name, + }); + return rvalue(gz, ri, result, node); +} + +fn typeCast( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + lhs_node: Ast.Node.Index, + rhs_node: Ast.Node.Index, + tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + try emitDbgNode(gz, node); + + const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ + .lhs = try typeExpr(gz, scope, lhs_node), + .rhs = try expr(gz, scope, .{ .rl = .none }, rhs_node), + }); + return rvalue(gz, ri, result, node); +} + +fn simpleUnOpType( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + operand_node: Ast.Node.Index, + tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const operand = try typeExpr(gz, scope, operand_node); + const result = try gz.addUnNode(tag, operand, node); + return rvalue(gz, ri, result, node); +} + +fn simpleUnOp( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + operand_ri: ResultInfo, + operand_node: Ast.Node.Index, + tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const prev_force_comptime = gz.force_comptime; + defer gz.force_comptime = prev_force_comptime; + + switch (tag) { + .tag_name, .error_name, .ptr_to_int => try emitDbgNode(gz, node), + .compile_error => gz.force_comptime = true, + else => {}, + } + const operand = try expr(gz, scope, operand_ri, operand_node); + const result = try gz.addUnNode(tag, operand, node); + return rvalue(gz, ri, result, node); +} + +fn negation( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + + // Check for float literal as the sub-expression because we want to preserve + // its negativity rather than having it go through comptime subtraction. + const operand_node = node_datas[node].lhs; + if (node_tags[operand_node] == .number_literal) { + return numberLiteral(gz, ri, operand_node, node, .negative); + } + + const operand = try expr(gz, scope, .{ .rl = .none }, operand_node); + const result = try gz.addUnNode(.negate, operand, node); + return rvalue(gz, ri, result, node); +} + +fn cmpxchg( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + params: []const Ast.Node.Index, + small: u16, +) InnerError!Zir.Inst.Ref { + const int_type = try typeExpr(gz, scope, params[0]); + const result = try gz.addExtendedPayloadSmall(.cmpxchg, small, Zir.Inst.Cmpxchg{ + // zig fmt: off + .node = gz.nodeIndexToRelative(node), + .ptr = try expr(gz, scope, .{ .rl = .none }, params[1]), + .expected_value = try expr(gz, scope, .{ .rl = .{ .ty = int_type } }, params[2]), + .new_value = try expr(gz, scope, .{ .rl = .{ .coerced_ty = int_type } }, params[3]), + .success_order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[4]), + .failure_order = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .atomic_order_type } }, params[5]), + // zig fmt: on + }); + return rvalue(gz, ri, result, node); +} + +fn bitBuiltin( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + operand_node: Ast.Node.Index, + tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const operand = try expr(gz, scope, .{ .rl = .none }, operand_node); + const result = try gz.addUnNode(tag, operand, node); + return rvalue(gz, ri, result, node); +} + +fn divBuiltin( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + lhs_node: Ast.Node.Index, + rhs_node: Ast.Node.Index, + tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + try emitDbgNode(gz, node); + + const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ + .lhs = try expr(gz, scope, .{ .rl = .none }, lhs_node), + .rhs = try expr(gz, scope, .{ .rl = .none }, rhs_node), + }); + return rvalue(gz, ri, result, node); +} + +fn simpleCBuiltin( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + operand_node: Ast.Node.Index, + tag: Zir.Inst.Extended, +) InnerError!Zir.Inst.Ref { + const name: []const u8 = if (tag == .c_undef) "C undef" else "C include"; + if (!gz.c_import) return gz.astgen.failNode(node, "{s} valid only inside C import block", .{name}); + const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, operand_node); + _ = try gz.addExtendedPayload(tag, Zir.Inst.UnNode{ + .node = gz.nodeIndexToRelative(node), + .operand = operand, + }); + return rvalue(gz, ri, .void_value, node); +} + +fn offsetOf( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + lhs_node: Ast.Node.Index, + rhs_node: Ast.Node.Index, + tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + const type_inst = try typeExpr(gz, scope, lhs_node); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, rhs_node); + const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ + .lhs = type_inst, + .rhs = field_name, + }); + return rvalue(gz, ri, result, node); +} + +fn shiftOp( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + lhs_node: Ast.Node.Index, + rhs_node: Ast.Node.Index, + tag: Zir.Inst.Tag, +) InnerError!Zir.Inst.Ref { + var line = gz.astgen.source_line - gz.decl_line; + var column = gz.astgen.source_column; + const lhs = try expr(gz, scope, .{ .rl = .none }, lhs_node); + + switch (gz.astgen.tree.nodes.items(.tag)[node]) { + .shl, .shr => { + maybeAdvanceSourceCursorToMainToken(gz, node); + line = gz.astgen.source_line - gz.decl_line; + column = gz.astgen.source_column; + }, + else => {}, + } + + const log2_int_type = try gz.addUnNode(.typeof_log2_int_type, lhs, lhs_node); + const rhs = try expr(gz, scope, .{ .rl = .{ .ty = log2_int_type }, .ctx = .shift_op }, rhs_node); + + try emitDbgStmt(gz, line, column); + const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ + .lhs = lhs, + .rhs = rhs, + }); + return rvalue(gz, ri, result, node); +} + +fn cImport( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + body_node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + + if (gz.c_import) return gz.astgen.failNode(node, "cannot nest @cImport", .{}); + + var block_scope = gz.makeSubBlock(scope); + block_scope.force_comptime = true; + block_scope.c_import = true; + defer block_scope.unstack(); + + const block_inst = try gz.makeBlockInst(.c_import, node); + const block_result = try expr(&block_scope, &block_scope.base, .{ .rl = .none }, body_node); + _ = try gz.addUnNode(.ensure_result_used, block_result, node); + if (!gz.refIsNoReturn(block_result)) { + _ = try block_scope.addBreak(.break_inline, block_inst, .void_value); + } + try block_scope.setBlockBody(block_inst); + // block_scope unstacked now, can add new instructions to gz + try gz.instructions.append(gpa, block_inst); + + return indexToRef(block_inst); +} + +fn overflowArithmetic( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + params: []const Ast.Node.Index, + tag: Zir.Inst.Extended, +) InnerError!Zir.Inst.Ref { + const lhs = try expr(gz, scope, .{ .rl = .none }, params[0]); + const rhs = try expr(gz, scope, .{ .rl = .none }, params[1]); + const result = try gz.addExtendedPayload(tag, Zir.Inst.BinNode{ + .node = gz.nodeIndexToRelative(node), + .lhs = lhs, + .rhs = rhs, + }); + return rvalue(gz, ri, result, node); +} + +fn callExpr( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + call: Ast.full.Call, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + + const callee = try calleeExpr(gz, scope, call.ast.fn_expr); + const modifier: std.builtin.CallModifier = blk: { + if (gz.force_comptime) { + break :blk .compile_time; + } + if (call.async_token != null) { + break :blk .async_kw; + } + if (gz.nosuspend_node != 0) { + break :blk .no_async; + } + break :blk .auto; + }; + + { + astgen.advanceSourceCursor(astgen.tree.tokens.items(.start)[call.ast.lparen]); + const line = astgen.source_line - gz.decl_line; + const column = astgen.source_column; + + _ = try gz.add(.{ .tag = .dbg_stmt, .data = .{ + .dbg_stmt = .{ + .line = line, + .column = column, + }, + } }); + } + + assert(callee != .none); + assert(node != 0); + + const call_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + const call_inst = Zir.indexToRef(call_index); + try gz.astgen.instructions.append(astgen.gpa, undefined); + try gz.instructions.append(astgen.gpa, call_index); + + const scratch_top = astgen.scratch.items.len; + defer astgen.scratch.items.len = scratch_top; + + var scratch_index = scratch_top; + try astgen.scratch.resize(astgen.gpa, scratch_top + call.ast.params.len); + + for (call.ast.params) |param_node| { + var arg_block = gz.makeSubBlock(scope); + defer arg_block.unstack(); + + // `call_inst` is reused to provide the param type. + arg_block.rl_ty_inst = call_inst; + const arg_ref = try expr(&arg_block, &arg_block.base, .{ .rl = .{ .coerced_ty = call_inst }, .ctx = .fn_arg }, param_node); + _ = try arg_block.addBreakWithSrcNode(.break_inline, call_index, arg_ref, param_node); + + const body = arg_block.instructionsSlice(); + try astgen.scratch.ensureUnusedCapacity(astgen.gpa, countBodyLenAfterFixups(astgen, body)); + appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body); + + astgen.scratch.items[scratch_index] = @intCast(u32, astgen.scratch.items.len - scratch_top); + scratch_index += 1; + } + + // If our result location is a try/catch/error-union-if/return, a function argument, + // or an initializer for a `const` variable, the error trace propagates. + // Otherwise, it should always be popped (handled in Sema). + const propagate_error_trace = switch (ri.ctx) { + .error_handling_expr, .@"return", .fn_arg, .const_init => true, + else => false, + }; + + const payload_index = try addExtra(astgen, Zir.Inst.Call{ + .callee = callee, + .flags = .{ + .pop_error_return_trace = !propagate_error_trace, + .packed_modifier = @intCast(Zir.Inst.Call.Flags.PackedModifier, @enumToInt(modifier)), + .args_len = @intCast(Zir.Inst.Call.Flags.PackedArgsLen, call.ast.params.len), + }, + }); + if (call.ast.params.len != 0) { + try astgen.extra.appendSlice(astgen.gpa, astgen.scratch.items[scratch_top..]); + } + gz.astgen.instructions.set(call_index, .{ + .tag = .call, + .data = .{ .pl_node = .{ + .src_node = gz.nodeIndexToRelative(node), + .payload_index = payload_index, + } }, + }); + return rvalue(gz, ri, call_inst, node); // TODO function call with result location +} + +/// calleeExpr generates the function part of a call expression (f in f(x)), or the +/// callee argument to the @call() builtin. If the lhs is a field access or the +/// @field() builtin, we need to generate a special field_call_bind instruction +/// instead of the normal field_val or field_ptr. If this is a inst.func() call, +/// this instruction will capture the value of the first argument before evaluating +/// the other arguments. We need to use .ref here to guarantee we will be able to +/// promote an lvalue to an address if the first parameter requires it. This +/// unfortunately also means we need to take a reference to any types on the lhs. +fn calleeExpr( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const tree = astgen.tree; + + const tag = tree.nodes.items(.tag)[node]; + switch (tag) { + .field_access => return addFieldAccess(.field_call_bind, gz, scope, .{ .rl = .ref }, node), + + .builtin_call_two, + .builtin_call_two_comma, + .builtin_call, + .builtin_call_comma, + => { + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); + const builtin_token = main_tokens[node]; + const builtin_name = tree.tokenSlice(builtin_token); + + var inline_params: [2]Ast.Node.Index = undefined; + var params: []Ast.Node.Index = switch (tag) { + .builtin_call, + .builtin_call_comma, + => tree.extra_data[node_datas[node].lhs..node_datas[node].rhs], + + .builtin_call_two, + .builtin_call_two_comma, + => blk: { + inline_params = .{ node_datas[node].lhs, node_datas[node].rhs }; + const len: usize = if (inline_params[0] == 0) @as(usize, 0) else if (inline_params[1] == 0) @as(usize, 1) else @as(usize, 2); + break :blk inline_params[0..len]; + }, + + else => unreachable, + }; + + // If anything is wrong, fall back to builtinCall. + // It will emit any necessary compile errors and notes. + if (std.mem.eql(u8, builtin_name, "@field") and params.len == 2) { + const lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]); + return gz.addExtendedPayload(.field_call_bind_named, Zir.Inst.FieldNamedNode{ + .node = gz.nodeIndexToRelative(node), + .lhs = lhs, + .field_name = field_name, + }); + } + + return builtinCall(gz, scope, .{ .rl = .none }, node, params); + }, + else => return expr(gz, scope, .{ .rl = .none }, node), + } +} + +const primitive_instrs = std.ComptimeStringMap(Zir.Inst.Ref, .{ + .{ "anyerror", .anyerror_type }, + .{ "anyframe", .anyframe_type }, + .{ "anyopaque", .anyopaque_type }, + .{ "bool", .bool_type }, + .{ "c_int", .c_int_type }, + .{ "c_long", .c_long_type }, + .{ "c_longdouble", .c_longdouble_type }, + .{ "c_longlong", .c_longlong_type }, + .{ "c_short", .c_short_type }, + .{ "c_uint", .c_uint_type }, + .{ "c_ulong", .c_ulong_type }, + .{ "c_ulonglong", .c_ulonglong_type }, + .{ "c_ushort", .c_ushort_type }, + .{ "comptime_float", .comptime_float_type }, + .{ "comptime_int", .comptime_int_type }, + .{ "f128", .f128_type }, + .{ "f16", .f16_type }, + .{ "f32", .f32_type }, + .{ "f64", .f64_type }, + .{ "f80", .f80_type }, + .{ "false", .bool_false }, + .{ "i16", .i16_type }, + .{ "i32", .i32_type }, + .{ "i64", .i64_type }, + .{ "i128", .i128_type }, + .{ "i8", .i8_type }, + .{ "isize", .isize_type }, + .{ "noreturn", .noreturn_type }, + .{ "null", .null_value }, + .{ "true", .bool_true }, + .{ "type", .type_type }, + .{ "u16", .u16_type }, + .{ "u29", .u29_type }, + .{ "u32", .u32_type }, + .{ "u64", .u64_type }, + .{ "u128", .u128_type }, + .{ "u1", .u1_type }, + .{ "u8", .u8_type }, + .{ "undefined", .undef }, + .{ "usize", .usize_type }, + .{ "void", .void_type }, +}); + +comptime { + // These checks ensure that std.zig.primitives stays in sync with the primitive->Zir map. + const primitives = std.zig.primitives; + for (primitive_instrs.kvs) |kv| { + if (!primitives.isPrimitive(kv.key)) { + @compileError("std.zig.isPrimitive() is not aware of Zir instr '" ++ @tagName(kv.value) ++ "'"); + } + } + for (primitives.names.kvs) |kv| { + if (primitive_instrs.get(kv.key) == null) { + @compileError("std.zig.primitives entry '" ++ kv.key ++ "' does not have a corresponding Zir instr"); + } + } +} + +fn nodeIsTriviallyZero(tree: *const Ast, node: Ast.Node.Index) bool { + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + + switch (node_tags[node]) { + .number_literal => { + const ident = main_tokens[node]; + return switch (std.zig.parseNumberLiteral(tree.tokenSlice(ident))) { + .int => |number| switch (number) { + 0 => true, + else => false, + }, + else => false, + }; + }, + else => return false, + } +} + +fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index, have_res_ty: bool) bool { + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + + var node = start_node; + while (true) { + switch (node_tags[node]) { + .root, + .@"usingnamespace", + .test_decl, + .switch_case, + .switch_case_inline, + .switch_case_one, + .switch_case_inline_one, + .container_field_init, + .container_field_align, + .container_field, + .asm_output, + .asm_input, + => unreachable, + + .@"return", + .@"break", + .@"continue", + .bit_not, + .bool_not, + .global_var_decl, + .local_var_decl, + .simple_var_decl, + .aligned_var_decl, + .@"defer", + .@"errdefer", + .address_of, + .optional_type, + .negation, + .negation_wrap, + .@"resume", + .array_type, + .array_type_sentinel, + .ptr_type_aligned, + .ptr_type_sentinel, + .ptr_type, + .ptr_type_bit_range, + .@"suspend", + .fn_proto_simple, + .fn_proto_multi, + .fn_proto_one, + .fn_proto, + .fn_decl, + .anyframe_type, + .anyframe_literal, + .number_literal, + .enum_literal, + .string_literal, + .multiline_string_literal, + .char_literal, + .unreachable_literal, + .identifier, + .error_set_decl, + .container_decl, + .container_decl_trailing, + .container_decl_two, + .container_decl_two_trailing, + .container_decl_arg, + .container_decl_arg_trailing, + .tagged_union, + .tagged_union_trailing, + .tagged_union_two, + .tagged_union_two_trailing, + .tagged_union_enum_tag, + .tagged_union_enum_tag_trailing, + .@"asm", + .asm_simple, + .add, + .add_wrap, + .add_sat, + .array_cat, + .array_mult, + .assign, + .assign_bit_and, + .assign_bit_or, + .assign_shl, + .assign_shl_sat, + .assign_shr, + .assign_bit_xor, + .assign_div, + .assign_sub, + .assign_sub_wrap, + .assign_sub_sat, + .assign_mod, + .assign_add, + .assign_add_wrap, + .assign_add_sat, + .assign_mul, + .assign_mul_wrap, + .assign_mul_sat, + .bang_equal, + .bit_and, + .bit_or, + .shl, + .shl_sat, + .shr, + .bit_xor, + .bool_and, + .bool_or, + .div, + .equal_equal, + .error_union, + .greater_or_equal, + .greater_than, + .less_or_equal, + .less_than, + .merge_error_sets, + .mod, + .mul, + .mul_wrap, + .mul_sat, + .switch_range, + .for_range, + .field_access, + .sub, + .sub_wrap, + .sub_sat, + .slice, + .slice_open, + .slice_sentinel, + .deref, + .array_access, + .error_value, + .while_simple, // This variant cannot have an else expression. + .while_cont, // This variant cannot have an else expression. + .for_simple, // This variant cannot have an else expression. + .if_simple, // This variant cannot have an else expression. + => return false, + + // Forward the question to the LHS sub-expression. + .grouped_expression, + .@"try", + .@"await", + .@"comptime", + .@"nosuspend", + .unwrap_optional, + => node = node_datas[node].lhs, + + // Forward the question to the RHS sub-expression. + .@"catch", + .@"orelse", + => node = node_datas[node].rhs, + + // Array and struct init exprs write to result locs, but anon literals do not. + .array_init_one, + .array_init_one_comma, + .struct_init_one, + .struct_init_one_comma, + .array_init, + .array_init_comma, + .struct_init, + .struct_init_comma, + => return have_res_ty or node_datas[node].lhs != 0, + + // Anon literals do not need result location. + .array_init_dot_two, + .array_init_dot_two_comma, + .array_init_dot, + .array_init_dot_comma, + .struct_init_dot_two, + .struct_init_dot_two_comma, + .struct_init_dot, + .struct_init_dot_comma, + => return have_res_ty, + + // True because depending on comptime conditions, sub-expressions + // may be the kind that need memory locations. + .@"while", // This variant always has an else expression. + .@"if", // This variant always has an else expression. + .@"for", // This variant always has an else expression. + .@"switch", + .switch_comma, + .call_one, + .call_one_comma, + .async_call_one, + .async_call_one_comma, + .call, + .call_comma, + .async_call, + .async_call_comma, + => return true, + + .block_two, + .block_two_semicolon, + .block, + .block_semicolon, + => { + const lbrace = main_tokens[node]; + if (token_tags[lbrace - 1] == .colon) { + // Labeled blocks may need a memory location to forward + // to their break statements. + return true; + } else { + return false; + } + }, + + .builtin_call_two, .builtin_call_two_comma => { + const builtin_token = main_tokens[node]; + const builtin_name = tree.tokenSlice(builtin_token); + // If the builtin is an invalid name, we don't cause an error here; instead + // let it pass, and the error will be "invalid builtin function" later. + const builtin_info = BuiltinFn.list.get(builtin_name) orelse return false; + switch (builtin_info.needs_mem_loc) { + .never => return false, + .always => return true, + .forward1 => node = node_datas[node].rhs, + } + // Missing builtin arg is not a parsing error, expect an error later. + if (node == 0) return false; + }, + + .builtin_call, .builtin_call_comma => { + const params = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs]; + const builtin_token = main_tokens[node]; + const builtin_name = tree.tokenSlice(builtin_token); + // If the builtin is an invalid name, we don't cause an error here; instead + // let it pass, and the error will be "invalid builtin function" later. + const builtin_info = BuiltinFn.list.get(builtin_name) orelse return false; + switch (builtin_info.needs_mem_loc) { + .never => return false, + .always => return true, + .forward1 => node = params[1], + } + // Missing builtin arg is not a parsing error, expect an error later. + if (node == 0) return false; + }, + } + } +} + +fn nodeMayAppendToErrorTrace(tree: *const Ast, start_node: Ast.Node.Index) bool { + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + + var node = start_node; + while (true) { + switch (node_tags[node]) { + // These don't have the opportunity to call any runtime functions. + .error_value, + .identifier, + .@"comptime", + => return false, + + // Forward the question to the LHS sub-expression. + .grouped_expression, + .@"try", + .@"nosuspend", + .unwrap_optional, + => node = node_datas[node].lhs, + + // Anything that does not eval to an error is guaranteed to pop any + // additions to the error trace, so it effectively does not append. + else => return nodeMayEvalToError(tree, start_node) != .never, + } + } +} + +fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) BuiltinFn.EvalToError { + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + + var node = start_node; + while (true) { + switch (node_tags[node]) { + .root, + .@"usingnamespace", + .test_decl, + .switch_case, + .switch_case_inline, + .switch_case_one, + .switch_case_inline_one, + .container_field_init, + .container_field_align, + .container_field, + .asm_output, + .asm_input, + => unreachable, + + .error_value => return .always, + + .@"asm", + .asm_simple, + .identifier, + .field_access, + .deref, + .array_access, + .while_simple, + .while_cont, + .for_simple, + .if_simple, + .@"while", + .@"if", + .@"for", + .@"switch", + .switch_comma, + .call_one, + .call_one_comma, + .async_call_one, + .async_call_one_comma, + .call, + .call_comma, + .async_call, + .async_call_comma, + => return .maybe, + + .@"return", + .@"break", + .@"continue", + .bit_not, + .bool_not, + .global_var_decl, + .local_var_decl, + .simple_var_decl, + .aligned_var_decl, + .@"defer", + .@"errdefer", + .address_of, + .optional_type, + .negation, + .negation_wrap, + .@"resume", + .array_type, + .array_type_sentinel, + .ptr_type_aligned, + .ptr_type_sentinel, + .ptr_type, + .ptr_type_bit_range, + .@"suspend", + .fn_proto_simple, + .fn_proto_multi, + .fn_proto_one, + .fn_proto, + .fn_decl, + .anyframe_type, + .anyframe_literal, + .number_literal, + .enum_literal, + .string_literal, + .multiline_string_literal, + .char_literal, + .unreachable_literal, + .error_set_decl, + .container_decl, + .container_decl_trailing, + .container_decl_two, + .container_decl_two_trailing, + .container_decl_arg, + .container_decl_arg_trailing, + .tagged_union, + .tagged_union_trailing, + .tagged_union_two, + .tagged_union_two_trailing, + .tagged_union_enum_tag, + .tagged_union_enum_tag_trailing, + .add, + .add_wrap, + .add_sat, + .array_cat, + .array_mult, + .assign, + .assign_bit_and, + .assign_bit_or, + .assign_shl, + .assign_shl_sat, + .assign_shr, + .assign_bit_xor, + .assign_div, + .assign_sub, + .assign_sub_wrap, + .assign_sub_sat, + .assign_mod, + .assign_add, + .assign_add_wrap, + .assign_add_sat, + .assign_mul, + .assign_mul_wrap, + .assign_mul_sat, + .bang_equal, + .bit_and, + .bit_or, + .shl, + .shl_sat, + .shr, + .bit_xor, + .bool_and, + .bool_or, + .div, + .equal_equal, + .error_union, + .greater_or_equal, + .greater_than, + .less_or_equal, + .less_than, + .merge_error_sets, + .mod, + .mul, + .mul_wrap, + .mul_sat, + .switch_range, + .for_range, + .sub, + .sub_wrap, + .sub_sat, + .slice, + .slice_open, + .slice_sentinel, + .array_init_one, + .array_init_one_comma, + .array_init_dot_two, + .array_init_dot_two_comma, + .array_init_dot, + .array_init_dot_comma, + .array_init, + .array_init_comma, + .struct_init_one, + .struct_init_one_comma, + .struct_init_dot_two, + .struct_init_dot_two_comma, + .struct_init_dot, + .struct_init_dot_comma, + .struct_init, + .struct_init_comma, + => return .never, + + // Forward the question to the LHS sub-expression. + .grouped_expression, + .@"try", + .@"await", + .@"comptime", + .@"nosuspend", + .unwrap_optional, + => node = node_datas[node].lhs, + + // LHS sub-expression may still be an error under the outer optional or error union + .@"catch", + .@"orelse", + => return .maybe, + + .block_two, + .block_two_semicolon, + .block, + .block_semicolon, + => { + const lbrace = main_tokens[node]; + if (token_tags[lbrace - 1] == .colon) { + // Labeled blocks may need a memory location to forward + // to their break statements. + return .maybe; + } else { + return .never; + } + }, + + .builtin_call, + .builtin_call_comma, + .builtin_call_two, + .builtin_call_two_comma, + => { + const builtin_token = main_tokens[node]; + const builtin_name = tree.tokenSlice(builtin_token); + // If the builtin is an invalid name, we don't cause an error here; instead + // let it pass, and the error will be "invalid builtin function" later. + const builtin_info = BuiltinFn.list.get(builtin_name) orelse return .maybe; + return builtin_info.eval_to_error; + }, + } + } +} + +/// Returns `true` if it is known the type expression has more than one possible value; +/// `false` otherwise. +fn nodeImpliesMoreThanOnePossibleValue(tree: *const Ast, start_node: Ast.Node.Index) bool { + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + + var node = start_node; + while (true) { + switch (node_tags[node]) { + .root, + .@"usingnamespace", + .test_decl, + .switch_case, + .switch_case_inline, + .switch_case_one, + .switch_case_inline_one, + .container_field_init, + .container_field_align, + .container_field, + .asm_output, + .asm_input, + .global_var_decl, + .local_var_decl, + .simple_var_decl, + .aligned_var_decl, + => unreachable, + + .@"return", + .@"break", + .@"continue", + .bit_not, + .bool_not, + .@"defer", + .@"errdefer", + .address_of, + .negation, + .negation_wrap, + .@"resume", + .array_type, + .@"suspend", + .fn_decl, + .anyframe_literal, + .number_literal, + .enum_literal, + .string_literal, + .multiline_string_literal, + .char_literal, + .unreachable_literal, + .error_set_decl, + .container_decl, + .container_decl_trailing, + .container_decl_two, + .container_decl_two_trailing, + .container_decl_arg, + .container_decl_arg_trailing, + .tagged_union, + .tagged_union_trailing, + .tagged_union_two, + .tagged_union_two_trailing, + .tagged_union_enum_tag, + .tagged_union_enum_tag_trailing, + .@"asm", + .asm_simple, + .add, + .add_wrap, + .add_sat, + .array_cat, + .array_mult, + .assign, + .assign_bit_and, + .assign_bit_or, + .assign_shl, + .assign_shl_sat, + .assign_shr, + .assign_bit_xor, + .assign_div, + .assign_sub, + .assign_sub_wrap, + .assign_sub_sat, + .assign_mod, + .assign_add, + .assign_add_wrap, + .assign_add_sat, + .assign_mul, + .assign_mul_wrap, + .assign_mul_sat, + .bang_equal, + .bit_and, + .bit_or, + .shl, + .shl_sat, + .shr, + .bit_xor, + .bool_and, + .bool_or, + .div, + .equal_equal, + .error_union, + .greater_or_equal, + .greater_than, + .less_or_equal, + .less_than, + .merge_error_sets, + .mod, + .mul, + .mul_wrap, + .mul_sat, + .switch_range, + .for_range, + .field_access, + .sub, + .sub_wrap, + .sub_sat, + .slice, + .slice_open, + .slice_sentinel, + .deref, + .array_access, + .error_value, + .while_simple, + .while_cont, + .for_simple, + .if_simple, + .@"catch", + .@"orelse", + .array_init_one, + .array_init_one_comma, + .array_init_dot_two, + .array_init_dot_two_comma, + .array_init_dot, + .array_init_dot_comma, + .array_init, + .array_init_comma, + .struct_init_one, + .struct_init_one_comma, + .struct_init_dot_two, + .struct_init_dot_two_comma, + .struct_init_dot, + .struct_init_dot_comma, + .struct_init, + .struct_init_comma, + .@"while", + .@"if", + .@"for", + .@"switch", + .switch_comma, + .call_one, + .call_one_comma, + .async_call_one, + .async_call_one_comma, + .call, + .call_comma, + .async_call, + .async_call_comma, + .block_two, + .block_two_semicolon, + .block, + .block_semicolon, + .builtin_call, + .builtin_call_comma, + .builtin_call_two, + .builtin_call_two_comma, + // these are function bodies, not pointers + .fn_proto_simple, + .fn_proto_multi, + .fn_proto_one, + .fn_proto, + => return false, + + // Forward the question to the LHS sub-expression. + .grouped_expression, + .@"try", + .@"await", + .@"comptime", + .@"nosuspend", + .unwrap_optional, + => node = node_datas[node].lhs, + + .ptr_type_aligned, + .ptr_type_sentinel, + .ptr_type, + .ptr_type_bit_range, + .optional_type, + .anyframe_type, + .array_type_sentinel, + => return true, + + .identifier => { + const main_tokens = tree.nodes.items(.main_token); + const ident_bytes = tree.tokenSlice(main_tokens[node]); + if (primitive_instrs.get(ident_bytes)) |primitive| switch (primitive) { + .anyerror_type, + .anyframe_type, + .anyopaque_type, + .bool_type, + .c_int_type, + .c_long_type, + .c_longdouble_type, + .c_longlong_type, + .c_short_type, + .c_uint_type, + .c_ulong_type, + .c_ulonglong_type, + .c_ushort_type, + .comptime_float_type, + .comptime_int_type, + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .i16_type, + .i32_type, + .i64_type, + .i128_type, + .i8_type, + .isize_type, + .type_type, + .u16_type, + .u29_type, + .u32_type, + .u64_type, + .u128_type, + .u1_type, + .u8_type, + .usize_type, + => return true, + + .void_type, + .bool_false, + .bool_true, + .null_value, + .undef, + .noreturn_type, + => return false, + + else => unreachable, // that's all the values from `primitives`. + } else { + return false; + } + }, + } + } +} + +/// Returns `true` if it is known the expression is a type that cannot be used at runtime; +/// `false` otherwise. +fn nodeImpliesComptimeOnly(tree: *const Ast, start_node: Ast.Node.Index) bool { + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + + var node = start_node; + while (true) { + switch (node_tags[node]) { + .root, + .@"usingnamespace", + .test_decl, + .switch_case, + .switch_case_inline, + .switch_case_one, + .switch_case_inline_one, + .container_field_init, + .container_field_align, + .container_field, + .asm_output, + .asm_input, + .global_var_decl, + .local_var_decl, + .simple_var_decl, + .aligned_var_decl, + => unreachable, + + .@"return", + .@"break", + .@"continue", + .bit_not, + .bool_not, + .@"defer", + .@"errdefer", + .address_of, + .negation, + .negation_wrap, + .@"resume", + .array_type, + .@"suspend", + .fn_decl, + .anyframe_literal, + .number_literal, + .enum_literal, + .string_literal, + .multiline_string_literal, + .char_literal, + .unreachable_literal, + .error_set_decl, + .container_decl, + .container_decl_trailing, + .container_decl_two, + .container_decl_two_trailing, + .container_decl_arg, + .container_decl_arg_trailing, + .tagged_union, + .tagged_union_trailing, + .tagged_union_two, + .tagged_union_two_trailing, + .tagged_union_enum_tag, + .tagged_union_enum_tag_trailing, + .@"asm", + .asm_simple, + .add, + .add_wrap, + .add_sat, + .array_cat, + .array_mult, + .assign, + .assign_bit_and, + .assign_bit_or, + .assign_shl, + .assign_shl_sat, + .assign_shr, + .assign_bit_xor, + .assign_div, + .assign_sub, + .assign_sub_wrap, + .assign_sub_sat, + .assign_mod, + .assign_add, + .assign_add_wrap, + .assign_add_sat, + .assign_mul, + .assign_mul_wrap, + .assign_mul_sat, + .bang_equal, + .bit_and, + .bit_or, + .shl, + .shl_sat, + .shr, + .bit_xor, + .bool_and, + .bool_or, + .div, + .equal_equal, + .error_union, + .greater_or_equal, + .greater_than, + .less_or_equal, + .less_than, + .merge_error_sets, + .mod, + .mul, + .mul_wrap, + .mul_sat, + .switch_range, + .for_range, + .field_access, + .sub, + .sub_wrap, + .sub_sat, + .slice, + .slice_open, + .slice_sentinel, + .deref, + .array_access, + .error_value, + .while_simple, + .while_cont, + .for_simple, + .if_simple, + .@"catch", + .@"orelse", + .array_init_one, + .array_init_one_comma, + .array_init_dot_two, + .array_init_dot_two_comma, + .array_init_dot, + .array_init_dot_comma, + .array_init, + .array_init_comma, + .struct_init_one, + .struct_init_one_comma, + .struct_init_dot_two, + .struct_init_dot_two_comma, + .struct_init_dot, + .struct_init_dot_comma, + .struct_init, + .struct_init_comma, + .@"while", + .@"if", + .@"for", + .@"switch", + .switch_comma, + .call_one, + .call_one_comma, + .async_call_one, + .async_call_one_comma, + .call, + .call_comma, + .async_call, + .async_call_comma, + .block_two, + .block_two_semicolon, + .block, + .block_semicolon, + .builtin_call, + .builtin_call_comma, + .builtin_call_two, + .builtin_call_two_comma, + .ptr_type_aligned, + .ptr_type_sentinel, + .ptr_type, + .ptr_type_bit_range, + .optional_type, + .anyframe_type, + .array_type_sentinel, + => return false, + + // these are function bodies, not pointers + .fn_proto_simple, + .fn_proto_multi, + .fn_proto_one, + .fn_proto, + => return true, + + // Forward the question to the LHS sub-expression. + .grouped_expression, + .@"try", + .@"await", + .@"comptime", + .@"nosuspend", + .unwrap_optional, + => node = node_datas[node].lhs, + + .identifier => { + const main_tokens = tree.nodes.items(.main_token); + const ident_bytes = tree.tokenSlice(main_tokens[node]); + if (primitive_instrs.get(ident_bytes)) |primitive| switch (primitive) { + .anyerror_type, + .anyframe_type, + .anyopaque_type, + .bool_type, + .c_int_type, + .c_long_type, + .c_longdouble_type, + .c_longlong_type, + .c_short_type, + .c_uint_type, + .c_ulong_type, + .c_ulonglong_type, + .c_ushort_type, + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .i16_type, + .i32_type, + .i64_type, + .i128_type, + .i8_type, + .isize_type, + .u16_type, + .u29_type, + .u32_type, + .u64_type, + .u128_type, + .u1_type, + .u8_type, + .usize_type, + .void_type, + .bool_false, + .bool_true, + .null_value, + .undef, + .noreturn_type, + => return false, + + .comptime_float_type, + .comptime_int_type, + .type_type, + => return true, + + else => unreachable, // that's all the values from `primitives`. + } else { + return false; + } + }, + } + } +} + +/// Returns `true` if the node uses `gz.anon_name_strategy`. +fn nodeUsesAnonNameStrategy(tree: *const Ast, node: Ast.Node.Index) bool { + const node_tags = tree.nodes.items(.tag); + switch (node_tags[node]) { + .container_decl, + .container_decl_trailing, + .container_decl_two, + .container_decl_two_trailing, + .container_decl_arg, + .container_decl_arg_trailing, + .tagged_union, + .tagged_union_trailing, + .tagged_union_two, + .tagged_union_two_trailing, + .tagged_union_enum_tag, + .tagged_union_enum_tag_trailing, + => return true, + .builtin_call_two, .builtin_call_two_comma, .builtin_call, .builtin_call_comma => { + const builtin_token = tree.nodes.items(.main_token)[node]; + const builtin_name = tree.tokenSlice(builtin_token); + return std.mem.eql(u8, builtin_name, "@Type"); + }, + else => return false, + } +} + +/// Applies `rl` semantics to `result`. Expressions which do not do their own handling of +/// result locations must call this function on their result. +/// As an example, if the `ResultLoc` is `ptr`, it will write the result to the pointer. +/// If the `ResultLoc` is `ty`, it will coerce the result to the type. +/// Assumes nothing stacked on `gz`. +fn rvalue( + gz: *GenZir, + ri: ResultInfo, + raw_result: Zir.Inst.Ref, + src_node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const result = r: { + if (refToIndex(raw_result)) |result_index| { + const zir_tags = gz.astgen.instructions.items(.tag); + const data = gz.astgen.instructions.items(.data)[result_index]; + if (zir_tags[result_index].isAlwaysVoid(data)) { + break :r Zir.Inst.Ref.void_value; + } + } + break :r raw_result; + }; + if (gz.endsWithNoReturn()) return result; + switch (ri.rl) { + .none, .coerced_ty => return result, + .discard => { + // Emit a compile error for discarding error values. + _ = try gz.addUnNode(.ensure_result_non_error, result, src_node); + return result; + }, + .ref => { + // We need a pointer but we have a value. + // Unfortunately it's not quite as simple as directly emitting a ref + // instruction here because we need subsequent address-of operator on + // const locals to return the same address. + const astgen = gz.astgen; + const tree = astgen.tree; + const src_token = tree.firstToken(src_node); + const result_index = refToIndex(result) orelse + return gz.addUnTok(.ref, result, src_token); + const zir_tags = gz.astgen.instructions.items(.tag); + if (zir_tags[result_index].isParam() or astgen.isInferred(result)) + return gz.addUnTok(.ref, result, src_token); + const gop = try astgen.ref_table.getOrPut(astgen.gpa, result_index); + if (!gop.found_existing) { + gop.value_ptr.* = try gz.makeUnTok(.ref, result, src_token); + } + return indexToRef(gop.value_ptr.*); + }, + .ty => |ty_inst| { + // Quickly eliminate some common, unnecessary type coercion. + const as_ty = @as(u64, @enumToInt(Zir.Inst.Ref.type_type)) << 32; + const as_comptime_int = @as(u64, @enumToInt(Zir.Inst.Ref.comptime_int_type)) << 32; + const as_bool = @as(u64, @enumToInt(Zir.Inst.Ref.bool_type)) << 32; + const as_usize = @as(u64, @enumToInt(Zir.Inst.Ref.usize_type)) << 32; + const as_void = @as(u64, @enumToInt(Zir.Inst.Ref.void_type)) << 32; + switch ((@as(u64, @enumToInt(ty_inst)) << 32) | @as(u64, @enumToInt(result))) { + as_ty | @enumToInt(Zir.Inst.Ref.u1_type), + as_ty | @enumToInt(Zir.Inst.Ref.u8_type), + as_ty | @enumToInt(Zir.Inst.Ref.i8_type), + as_ty | @enumToInt(Zir.Inst.Ref.u16_type), + as_ty | @enumToInt(Zir.Inst.Ref.u29_type), + as_ty | @enumToInt(Zir.Inst.Ref.i16_type), + as_ty | @enumToInt(Zir.Inst.Ref.u32_type), + as_ty | @enumToInt(Zir.Inst.Ref.i32_type), + as_ty | @enumToInt(Zir.Inst.Ref.u64_type), + as_ty | @enumToInt(Zir.Inst.Ref.i64_type), + as_ty | @enumToInt(Zir.Inst.Ref.usize_type), + as_ty | @enumToInt(Zir.Inst.Ref.isize_type), + as_ty | @enumToInt(Zir.Inst.Ref.c_short_type), + as_ty | @enumToInt(Zir.Inst.Ref.c_ushort_type), + as_ty | @enumToInt(Zir.Inst.Ref.c_int_type), + as_ty | @enumToInt(Zir.Inst.Ref.c_uint_type), + as_ty | @enumToInt(Zir.Inst.Ref.c_long_type), + as_ty | @enumToInt(Zir.Inst.Ref.c_ulong_type), + as_ty | @enumToInt(Zir.Inst.Ref.c_longlong_type), + as_ty | @enumToInt(Zir.Inst.Ref.c_ulonglong_type), + as_ty | @enumToInt(Zir.Inst.Ref.c_longdouble_type), + as_ty | @enumToInt(Zir.Inst.Ref.f16_type), + as_ty | @enumToInt(Zir.Inst.Ref.f32_type), + as_ty | @enumToInt(Zir.Inst.Ref.f64_type), + as_ty | @enumToInt(Zir.Inst.Ref.f80_type), + as_ty | @enumToInt(Zir.Inst.Ref.f128_type), + as_ty | @enumToInt(Zir.Inst.Ref.anyopaque_type), + as_ty | @enumToInt(Zir.Inst.Ref.bool_type), + as_ty | @enumToInt(Zir.Inst.Ref.void_type), + as_ty | @enumToInt(Zir.Inst.Ref.type_type), + as_ty | @enumToInt(Zir.Inst.Ref.anyerror_type), + as_ty | @enumToInt(Zir.Inst.Ref.comptime_int_type), + as_ty | @enumToInt(Zir.Inst.Ref.comptime_float_type), + as_ty | @enumToInt(Zir.Inst.Ref.noreturn_type), + as_ty | @enumToInt(Zir.Inst.Ref.null_type), + as_ty | @enumToInt(Zir.Inst.Ref.undefined_type), + as_ty | @enumToInt(Zir.Inst.Ref.fn_noreturn_no_args_type), + as_ty | @enumToInt(Zir.Inst.Ref.fn_void_no_args_type), + as_ty | @enumToInt(Zir.Inst.Ref.fn_naked_noreturn_no_args_type), + as_ty | @enumToInt(Zir.Inst.Ref.fn_ccc_void_no_args_type), + as_ty | @enumToInt(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type), + as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_type), + as_ty | @enumToInt(Zir.Inst.Ref.enum_literal_type), + as_comptime_int | @enumToInt(Zir.Inst.Ref.zero), + as_comptime_int | @enumToInt(Zir.Inst.Ref.one), + as_bool | @enumToInt(Zir.Inst.Ref.bool_true), + as_bool | @enumToInt(Zir.Inst.Ref.bool_false), + as_usize | @enumToInt(Zir.Inst.Ref.zero_usize), + as_usize | @enumToInt(Zir.Inst.Ref.one_usize), + as_void | @enumToInt(Zir.Inst.Ref.void_value), + => return result, // type of result is already correct + + // Need an explicit type coercion instruction. + else => return gz.addPlNode(ri.zirTag(), src_node, Zir.Inst.As{ + .dest_type = ty_inst, + .operand = result, + }), + } + }, + .ptr => |ptr_res| { + _ = try gz.addPlNode(.store_node, ptr_res.src_node orelse src_node, Zir.Inst.Bin{ + .lhs = ptr_res.inst, + .rhs = result, + }); + return result; + }, + .inferred_ptr => |alloc| { + _ = try gz.addBin(.store_to_inferred_ptr, alloc, result); + return result; + }, + .block_ptr => |block_scope| { + block_scope.rvalue_rl_count += 1; + _ = try gz.addBin(.store_to_block_ptr, block_scope.rl_ptr, result); + return result; + }, + } +} + +/// Given an identifier token, obtain the string for it. +/// If the token uses @"" syntax, parses as a string, reports errors if applicable, +/// and allocates the result within `astgen.arena`. +/// Otherwise, returns a reference to the source code bytes directly. +/// See also `appendIdentStr` and `parseStrLit`. +fn identifierTokenString(astgen: *AstGen, token: Ast.TokenIndex) InnerError![]const u8 { + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + assert(token_tags[token] == .identifier); + const ident_name = tree.tokenSlice(token); + if (!mem.startsWith(u8, ident_name, "@")) { + return ident_name; + } + var buf: ArrayListUnmanaged(u8) = .{}; + defer buf.deinit(astgen.gpa); + try astgen.parseStrLit(token, &buf, ident_name, 1); + if (mem.indexOfScalar(u8, buf.items, 0) != null) { + return astgen.failTok(token, "identifier cannot contain null bytes", .{}); + } else if (buf.items.len == 0) { + return astgen.failTok(token, "identifier cannot be empty", .{}); + } + const duped = try astgen.arena.dupe(u8, buf.items); + return duped; +} + +/// Given an identifier token, obtain the string for it (possibly parsing as a string +/// literal if it is @"" syntax), and append the string to `buf`. +/// See also `identifierTokenString` and `parseStrLit`. +fn appendIdentStr( + astgen: *AstGen, + token: Ast.TokenIndex, + buf: *ArrayListUnmanaged(u8), +) InnerError!void { + const tree = astgen.tree; + const token_tags = tree.tokens.items(.tag); + assert(token_tags[token] == .identifier); + const ident_name = tree.tokenSlice(token); + if (!mem.startsWith(u8, ident_name, "@")) { + return buf.appendSlice(astgen.gpa, ident_name); + } else { + const start = buf.items.len; + try astgen.parseStrLit(token, buf, ident_name, 1); + const slice = buf.items[start..]; + if (mem.indexOfScalar(u8, slice, 0) != null) { + return astgen.failTok(token, "identifier cannot contain null bytes", .{}); + } else if (slice.len == 0) { + return astgen.failTok(token, "identifier cannot be empty", .{}); + } + } +} + +/// Appends the result to `buf`. +fn parseStrLit( + astgen: *AstGen, + token: Ast.TokenIndex, + buf: *ArrayListUnmanaged(u8), + bytes: []const u8, + offset: u32, +) InnerError!void { + const raw_string = bytes[offset..]; + var buf_managed = buf.toManaged(astgen.gpa); + const result = std.zig.string_literal.parseWrite(buf_managed.writer(), raw_string); + buf.* = buf_managed.moveToUnmanaged(); + switch (try result) { + .success => return, + .failure => |err| return astgen.failWithStrLitError(err, token, bytes, offset), + } +} + +fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token: Ast.TokenIndex, bytes: []const u8, offset: u32) InnerError { + const raw_string = bytes[offset..]; + switch (err) { + .invalid_escape_character => |bad_index| { + return astgen.failOff( + token, + offset + @intCast(u32, bad_index), + "invalid escape character: '{c}'", + .{raw_string[bad_index]}, + ); + }, + .expected_hex_digit => |bad_index| { + return astgen.failOff( + token, + offset + @intCast(u32, bad_index), + "expected hex digit, found '{c}'", + .{raw_string[bad_index]}, + ); + }, + .empty_unicode_escape_sequence => |bad_index| { + return astgen.failOff( + token, + offset + @intCast(u32, bad_index), + "empty unicode escape sequence", + .{}, + ); + }, + .expected_hex_digit_or_rbrace => |bad_index| { + return astgen.failOff( + token, + offset + @intCast(u32, bad_index), + "expected hex digit or '}}', found '{c}'", + .{raw_string[bad_index]}, + ); + }, + .invalid_unicode_codepoint => |bad_index| { + return astgen.failOff( + token, + offset + @intCast(u32, bad_index), + "unicode escape does not correspond to a valid codepoint", + .{}, + ); + }, + .expected_lbrace => |bad_index| { + return astgen.failOff( + token, + offset + @intCast(u32, bad_index), + "expected '{{', found '{c}", + .{raw_string[bad_index]}, + ); + }, + .expected_rbrace => |bad_index| { + return astgen.failOff( + token, + offset + @intCast(u32, bad_index), + "expected '}}', found '{c}", + .{raw_string[bad_index]}, + ); + }, + .expected_single_quote => |bad_index| { + return astgen.failOff( + token, + offset + @intCast(u32, bad_index), + "expected single quote ('), found '{c}", + .{raw_string[bad_index]}, + ); + }, + .invalid_character => |bad_index| { + return astgen.failOff( + token, + offset + @intCast(u32, bad_index), + "invalid byte in string or character literal: '{c}'", + .{raw_string[bad_index]}, + ); + }, + } +} + +fn failNode( + astgen: *AstGen, + node: Ast.Node.Index, + comptime format: []const u8, + args: anytype, +) InnerError { + return astgen.failNodeNotes(node, format, args, &[0]u32{}); +} + +fn appendErrorNode( + astgen: *AstGen, + node: Ast.Node.Index, + comptime format: []const u8, + args: anytype, +) Allocator.Error!void { + try astgen.appendErrorNodeNotes(node, format, args, &[0]u32{}); +} + +fn appendErrorNodeNotes( + astgen: *AstGen, + node: Ast.Node.Index, + comptime format: []const u8, + args: anytype, + notes: []const u32, +) Allocator.Error!void { + @setCold(true); + const string_bytes = &astgen.string_bytes; + const msg = @intCast(u32, string_bytes.items.len); + try string_bytes.writer(astgen.gpa).print(format ++ "\x00", args); + const notes_index: u32 = if (notes.len != 0) blk: { + const notes_start = astgen.extra.items.len; + try astgen.extra.ensureTotalCapacity(astgen.gpa, notes_start + 1 + notes.len); + astgen.extra.appendAssumeCapacity(@intCast(u32, notes.len)); + astgen.extra.appendSliceAssumeCapacity(notes); + break :blk @intCast(u32, notes_start); + } else 0; + try astgen.compile_errors.append(astgen.gpa, .{ + .msg = msg, + .node = node, + .token = 0, + .byte_offset = 0, + .notes = notes_index, + }); +} + +fn failNodeNotes( + astgen: *AstGen, + node: Ast.Node.Index, + comptime format: []const u8, + args: anytype, + notes: []const u32, +) InnerError { + try appendErrorNodeNotes(astgen, node, format, args, notes); + return error.AnalysisFail; +} + +fn failTok( + astgen: *AstGen, + token: Ast.TokenIndex, + comptime format: []const u8, + args: anytype, +) InnerError { + return astgen.failTokNotes(token, format, args, &[0]u32{}); +} + +fn appendErrorTok( + astgen: *AstGen, + token: Ast.TokenIndex, + comptime format: []const u8, + args: anytype, +) !void { + try astgen.appendErrorTokNotesOff(token, 0, format, args, &[0]u32{}); +} + +fn failTokNotes( + astgen: *AstGen, + token: Ast.TokenIndex, + comptime format: []const u8, + args: anytype, + notes: []const u32, +) InnerError { + try appendErrorTokNotesOff(astgen, token, 0, format, args, notes); + return error.AnalysisFail; +} + +fn appendErrorTokNotes( + astgen: *AstGen, + token: Ast.TokenIndex, + comptime format: []const u8, + args: anytype, + notes: []const u32, +) !void { + return appendErrorTokNotesOff(astgen, token, 0, format, args, notes); +} + +/// Same as `fail`, except given a token plus an offset from its starting byte +/// offset. +fn failOff( + astgen: *AstGen, + token: Ast.TokenIndex, + byte_offset: u32, + comptime format: []const u8, + args: anytype, +) InnerError { + try appendErrorTokNotesOff(astgen, token, byte_offset, format, args, &.{}); + return error.AnalysisFail; +} + +fn appendErrorTokNotesOff( + astgen: *AstGen, + token: Ast.TokenIndex, + byte_offset: u32, + comptime format: []const u8, + args: anytype, + notes: []const u32, +) !void { + @setCold(true); + const gpa = astgen.gpa; + const string_bytes = &astgen.string_bytes; + const msg = @intCast(u32, string_bytes.items.len); + try string_bytes.writer(gpa).print(format ++ "\x00", args); + const notes_index: u32 = if (notes.len != 0) blk: { + const notes_start = astgen.extra.items.len; + try astgen.extra.ensureTotalCapacity(gpa, notes_start + 1 + notes.len); + astgen.extra.appendAssumeCapacity(@intCast(u32, notes.len)); + astgen.extra.appendSliceAssumeCapacity(notes); + break :blk @intCast(u32, notes_start); + } else 0; + try astgen.compile_errors.append(gpa, .{ + .msg = msg, + .node = 0, + .token = token, + .byte_offset = byte_offset, + .notes = notes_index, + }); +} + +fn errNoteTok( + astgen: *AstGen, + token: Ast.TokenIndex, + comptime format: []const u8, + args: anytype, +) Allocator.Error!u32 { + return errNoteTokOff(astgen, token, 0, format, args); +} + +fn errNoteTokOff( + astgen: *AstGen, + token: Ast.TokenIndex, + byte_offset: u32, + comptime format: []const u8, + args: anytype, +) Allocator.Error!u32 { + @setCold(true); + const string_bytes = &astgen.string_bytes; + const msg = @intCast(u32, string_bytes.items.len); + try string_bytes.writer(astgen.gpa).print(format ++ "\x00", args); + return astgen.addExtra(Zir.Inst.CompileErrors.Item{ + .msg = msg, + .node = 0, + .token = token, + .byte_offset = byte_offset, + .notes = 0, + }); +} + +fn errNoteNode( + astgen: *AstGen, + node: Ast.Node.Index, + comptime format: []const u8, + args: anytype, +) Allocator.Error!u32 { + @setCold(true); + const string_bytes = &astgen.string_bytes; + const msg = @intCast(u32, string_bytes.items.len); + try string_bytes.writer(astgen.gpa).print(format ++ "\x00", args); + return astgen.addExtra(Zir.Inst.CompileErrors.Item{ + .msg = msg, + .node = node, + .token = 0, + .byte_offset = 0, + .notes = 0, + }); +} + +fn identAsString(astgen: *AstGen, ident_token: Ast.TokenIndex) !u32 { + const gpa = astgen.gpa; + const string_bytes = &astgen.string_bytes; + const str_index = @intCast(u32, string_bytes.items.len); + try astgen.appendIdentStr(ident_token, string_bytes); + const key = string_bytes.items[str_index..]; + const gop = try astgen.string_table.getOrPutContextAdapted(gpa, @as([]const u8, key), StringIndexAdapter{ + .bytes = string_bytes, + }, StringIndexContext{ + .bytes = string_bytes, + }); + if (gop.found_existing) { + string_bytes.shrinkRetainingCapacity(str_index); + return gop.key_ptr.*; + } else { + gop.key_ptr.* = str_index; + try string_bytes.append(gpa, 0); + return str_index; + } +} + +/// Adds a doc comment block to `string_bytes` by walking backwards from `end_token`. +/// `end_token` must point at the first token after the last doc coment line. +/// Returns 0 if no doc comment is present. +fn docCommentAsString(astgen: *AstGen, end_token: Ast.TokenIndex) !u32 { + if (end_token == 0) return @as(u32, 0); + + const token_tags = astgen.tree.tokens.items(.tag); + + var tok = end_token - 1; + while (token_tags[tok] == .doc_comment) { + if (tok == 0) break; + tok -= 1; + } else { + tok += 1; + } + return docCommentAsStringFromFirst(astgen, end_token, tok); +} + +/// end_token must be > the index of the last doc comment. +fn docCommentAsStringFromFirst( + astgen: *AstGen, + end_token: Ast.TokenIndex, + start_token: Ast.TokenIndex, +) !u32 { + if (start_token == end_token) return 0; + + const gpa = astgen.gpa; + const string_bytes = &astgen.string_bytes; + const str_index = @intCast(u32, string_bytes.items.len); + const token_starts = astgen.tree.tokens.items(.start); + const token_tags = astgen.tree.tokens.items(.tag); + + const total_bytes = token_starts[end_token] - token_starts[start_token]; + try string_bytes.ensureUnusedCapacity(gpa, total_bytes); + + var current_token = start_token; + while (current_token < end_token) : (current_token += 1) { + switch (token_tags[current_token]) { + .doc_comment => { + const tok_bytes = astgen.tree.tokenSlice(current_token)[3..]; + string_bytes.appendSliceAssumeCapacity(tok_bytes); + if (current_token != end_token - 1) { + string_bytes.appendAssumeCapacity('\n'); + } + }, + else => break, + } + } + + const key = string_bytes.items[str_index..]; + const gop = try astgen.string_table.getOrPutContextAdapted(gpa, @as([]const u8, key), StringIndexAdapter{ + .bytes = string_bytes, + }, StringIndexContext{ + .bytes = string_bytes, + }); + + if (gop.found_existing) { + string_bytes.shrinkRetainingCapacity(str_index); + return gop.key_ptr.*; + } else { + gop.key_ptr.* = str_index; + try string_bytes.append(gpa, 0); + return str_index; + } +} + +const IndexSlice = struct { index: u32, len: u32 }; + +fn strLitAsString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !IndexSlice { + const gpa = astgen.gpa; + const string_bytes = &astgen.string_bytes; + const str_index = @intCast(u32, string_bytes.items.len); + const token_bytes = astgen.tree.tokenSlice(str_lit_token); + try astgen.parseStrLit(str_lit_token, string_bytes, token_bytes, 0); + const key = string_bytes.items[str_index..]; + const gop = try astgen.string_table.getOrPutContextAdapted(gpa, @as([]const u8, key), StringIndexAdapter{ + .bytes = string_bytes, + }, StringIndexContext{ + .bytes = string_bytes, + }); + if (gop.found_existing) { + string_bytes.shrinkRetainingCapacity(str_index); + return IndexSlice{ + .index = gop.key_ptr.*, + .len = @intCast(u32, key.len), + }; + } else { + gop.key_ptr.* = str_index; + // Still need a null byte because we are using the same table + // to lookup null terminated strings, so if we get a match, it has to + // be null terminated for that to work. + try string_bytes.append(gpa, 0); + return IndexSlice{ + .index = str_index, + .len = @intCast(u32, key.len), + }; + } +} + +fn strLitNodeAsString(astgen: *AstGen, node: Ast.Node.Index) !IndexSlice { + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + + const start = node_datas[node].lhs; + const end = node_datas[node].rhs; + + const gpa = astgen.gpa; + const string_bytes = &astgen.string_bytes; + const str_index = string_bytes.items.len; + + // First line: do not append a newline. + var tok_i = start; + { + const slice = tree.tokenSlice(tok_i); + const carriage_return_ending: usize = if (slice[slice.len - 2] == '\r') 2 else 1; + const line_bytes = slice[2 .. slice.len - carriage_return_ending]; + try string_bytes.appendSlice(gpa, line_bytes); + tok_i += 1; + } + // Following lines: each line prepends a newline. + while (tok_i <= end) : (tok_i += 1) { + const slice = tree.tokenSlice(tok_i); + const carriage_return_ending: usize = if (slice[slice.len - 2] == '\r') 2 else 1; + const line_bytes = slice[2 .. slice.len - carriage_return_ending]; + try string_bytes.ensureUnusedCapacity(gpa, line_bytes.len + 1); + string_bytes.appendAssumeCapacity('\n'); + string_bytes.appendSliceAssumeCapacity(line_bytes); + } + const len = string_bytes.items.len - str_index; + try string_bytes.append(gpa, 0); + return IndexSlice{ + .index = @intCast(u32, str_index), + .len = @intCast(u32, len), + }; +} + +fn testNameString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !u32 { + const gpa = astgen.gpa; + const string_bytes = &astgen.string_bytes; + const str_index = @intCast(u32, string_bytes.items.len); + const token_bytes = astgen.tree.tokenSlice(str_lit_token); + try string_bytes.append(gpa, 0); // Indicates this is a test. + try astgen.parseStrLit(str_lit_token, string_bytes, token_bytes, 0); + const slice = string_bytes.items[str_index + 1 ..]; + if (mem.indexOfScalar(u8, slice, 0) != null) { + return astgen.failTok(str_lit_token, "test name cannot contain null bytes", .{}); + } else if (slice.len == 0) { + return astgen.failTok(str_lit_token, "empty test name must be omitted", .{}); + } + try string_bytes.append(gpa, 0); + return str_index; +} + +const Scope = struct { + tag: Tag, + + fn cast(base: *Scope, comptime T: type) ?*T { + if (T == Defer) { + switch (base.tag) { + .defer_normal, .defer_error => return @fieldParentPtr(T, "base", base), + else => return null, + } + } + if (T == Namespace) { + switch (base.tag) { + .namespace, .enum_namespace => return @fieldParentPtr(T, "base", base), + else => return null, + } + } + if (base.tag != T.base_tag) + return null; + + return @fieldParentPtr(T, "base", base); + } + + fn parent(base: *Scope) ?*Scope { + return switch (base.tag) { + .gen_zir => base.cast(GenZir).?.parent, + .local_val => base.cast(LocalVal).?.parent, + .local_ptr => base.cast(LocalPtr).?.parent, + .defer_normal, .defer_error => base.cast(Defer).?.parent, + .namespace, .enum_namespace => base.cast(Namespace).?.parent, + .top => null, + }; + } + + const Tag = enum { + gen_zir, + local_val, + local_ptr, + defer_normal, + defer_error, + namespace, + enum_namespace, + top, + }; + + /// The category of identifier. These tag names are user-visible in compile errors. + const IdCat = enum { + @"function parameter", + @"local constant", + @"local variable", + @"switch tag capture", + capture, + }; + + /// This is always a `const` local and importantly the `inst` is a value type, not a pointer. + /// This structure lives as long as the AST generation of the Block + /// node that contains the variable. + const LocalVal = struct { + const base_tag: Tag = .local_val; + base: Scope = Scope{ .tag = base_tag }, + /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`. + parent: *Scope, + gen_zir: *GenZir, + inst: Zir.Inst.Ref, + /// Source location of the corresponding variable declaration. + token_src: Ast.TokenIndex, + /// Track the first identifer where it is referenced. + /// 0 means never referenced. + used: Ast.TokenIndex = 0, + /// Track the identifier where it is discarded, like this `_ = foo;`. + /// 0 means never discarded. + discarded: Ast.TokenIndex = 0, + /// String table index. + name: u32, + id_cat: IdCat, + }; + + /// This could be a `const` or `var` local. It has a pointer instead of a value. + /// This structure lives as long as the AST generation of the Block + /// node that contains the variable. + const LocalPtr = struct { + const base_tag: Tag = .local_ptr; + base: Scope = Scope{ .tag = base_tag }, + /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`. + parent: *Scope, + gen_zir: *GenZir, + ptr: Zir.Inst.Ref, + /// Source location of the corresponding variable declaration. + token_src: Ast.TokenIndex, + /// Track the first identifer where it is referenced. + /// 0 means never referenced. + used: Ast.TokenIndex = 0, + /// Track the identifier where it is discarded, like this `_ = foo;`. + /// 0 means never discarded. + discarded: Ast.TokenIndex = 0, + /// String table index. + name: u32, + id_cat: IdCat, + /// true means we find out during Sema whether the value is comptime. + /// false means it is already known at AstGen the value is runtime-known. + maybe_comptime: bool, + }; + + const Defer = struct { + base: Scope, + /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`. + parent: *Scope, + index: u32, + len: u32, + remapped_err_code: Zir.Inst.Index = 0, + }; + + /// Represents a global scope that has any number of declarations in it. + /// Each declaration has this as the parent scope. + const Namespace = struct { + const base_tag: Tag = .namespace; + base: Scope = Scope{ .tag = base_tag }, + + /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`. + parent: *Scope, + /// Maps string table index to the source location of declaration, + /// for the purposes of reporting name shadowing compile errors. + decls: std.AutoHashMapUnmanaged(u32, Ast.Node.Index) = .{}, + node: Ast.Node.Index, + inst: Zir.Inst.Index, + + /// The astgen scope containing this namespace. + /// Only valid during astgen. + declaring_gz: ?*GenZir, + + /// Map from the raw captured value to the instruction + /// ref of the capture for decls in this namespace + captures: std.AutoArrayHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}, + + fn deinit(self: *Namespace, gpa: Allocator) void { + self.decls.deinit(gpa); + self.captures.deinit(gpa); + self.* = undefined; + } + }; + + const Top = struct { + const base_tag: Scope.Tag = .top; + base: Scope = Scope{ .tag = base_tag }, + }; +}; + +/// This is a temporary structure; references to it are valid only +/// while constructing a `Zir`. +const GenZir = struct { + const base_tag: Scope.Tag = .gen_zir; + base: Scope = Scope{ .tag = base_tag }, + force_comptime: bool, + /// This is set to true for inline loops; false otherwise. + is_inline: bool = false, + c_import: bool = false, + /// How decls created in this scope should be named. + anon_name_strategy: Zir.Inst.NameStrategy = .anon, + /// The containing decl AST node. + decl_node_index: Ast.Node.Index, + /// The containing decl line index, absolute. + decl_line: u32, + /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`. + parent: *Scope, + /// All `GenZir` scopes for the same ZIR share this. + astgen: *AstGen, + /// Keeps track of the list of instructions in this scope. Possibly shared. + /// Indexes to instructions in `astgen`. + instructions: *ArrayListUnmanaged(Zir.Inst.Index), + /// A sub-block may share its instructions ArrayList with containing GenZir, + /// if use is strictly nested. This saves prior size of list for unstacking. + instructions_top: usize, + label: ?Label = null, + break_block: Zir.Inst.Index = 0, + continue_block: Zir.Inst.Index = 0, + /// Only valid when setBreakResultInfo is called. + break_result_info: AstGen.ResultInfo = undefined, + /// When a block has a pointer result location, here it is. + rl_ptr: Zir.Inst.Ref = .none, + /// When a block has a type result location, here it is. + rl_ty_inst: Zir.Inst.Ref = .none, + /// Keeps track of how many branches of a block did not actually + /// consume the result location. astgen uses this to figure out + /// whether to rely on break instructions or writing to the result + /// pointer for the result instruction. + rvalue_rl_count: usize = 0, + /// Keeps track of how many break instructions there are. When astgen is finished + /// with a block, it can check this against rvalue_rl_count to find out whether + /// the break instructions should be downgraded to break_void. + break_count: usize = 0, + /// Tracks `break :foo bar` instructions so they can possibly be elided later if + /// the labeled block ends up not needing a result location pointer. + labeled_breaks: ArrayListUnmanaged(struct { br: Zir.Inst.Index, search: Zir.Inst.Index }) = .{}, + + suspend_node: Ast.Node.Index = 0, + nosuspend_node: Ast.Node.Index = 0, + /// Set if this GenZir is a defer. + cur_defer_node: Ast.Node.Index = 0, + // Set if this GenZir is a defer or it is inside a defer. + any_defer_node: Ast.Node.Index = 0, + + /// Namespace members are lazy. When executing a decl within a namespace, + /// any references to external instructions need to be treated specially. + /// This list tracks those references. See also .closure_capture and .closure_get. + /// Keys are the raw instruction index, values are the closure_capture instruction. + captures: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}, + + const unstacked_top = std.math.maxInt(usize); + /// Call unstack before adding any new instructions to containing GenZir. + fn unstack(self: *GenZir) void { + if (self.instructions_top != unstacked_top) { + self.instructions.items.len = self.instructions_top; + self.instructions_top = unstacked_top; + } + } + + fn isEmpty(self: *const GenZir) bool { + return (self.instructions_top == unstacked_top) or + (self.instructions.items.len == self.instructions_top); + } + + fn instructionsSlice(self: *const GenZir) []Zir.Inst.Index { + return if (self.instructions_top == unstacked_top) + &[0]Zir.Inst.Index{} + else + self.instructions.items[self.instructions_top..]; + } + + fn instructionsSliceUpto(self: *const GenZir, stacked_gz: *GenZir) []Zir.Inst.Index { + return if (self.instructions_top == unstacked_top) + &[0]Zir.Inst.Index{} + else if (self.instructions == stacked_gz.instructions and stacked_gz.instructions_top != unstacked_top) + self.instructions.items[self.instructions_top..stacked_gz.instructions_top] + else + self.instructions.items[self.instructions_top..]; + } + + fn makeSubBlock(gz: *GenZir, scope: *Scope) GenZir { + return .{ + .force_comptime = gz.force_comptime, + .c_import = gz.c_import, + .decl_node_index = gz.decl_node_index, + .decl_line = gz.decl_line, + .parent = scope, + .rl_ty_inst = gz.rl_ty_inst, + .astgen = gz.astgen, + .suspend_node = gz.suspend_node, + .nosuspend_node = gz.nosuspend_node, + .any_defer_node = gz.any_defer_node, + .instructions = gz.instructions, + .instructions_top = gz.instructions.items.len, + }; + } + + fn makeCoercionScope( + parent_gz: *GenZir, + scope: *Scope, + dest_type: Zir.Inst.Ref, + result_ptr: Zir.Inst.Ref, + src_node: Ast.Node.Index, + ) !GenZir { + // Detect whether this expr() call goes into rvalue() to store the result into the + // result location. If it does, elide the coerce_result_ptr instruction + // as well as the store instruction, instead passing the result as an rvalue. + var as_scope = parent_gz.makeSubBlock(scope); + errdefer as_scope.unstack(); + as_scope.rl_ptr = try as_scope.addPlNode(.coerce_result_ptr, src_node, Zir.Inst.Bin{ .lhs = dest_type, .rhs = result_ptr }); + + // `rl_ty_inst` needs to be set in case the stores to `rl_ptr` are eliminated. + as_scope.rl_ty_inst = dest_type; + + return as_scope; + } + + /// Assumes `as_scope` is stacked immediately on top of `parent_gz`. Unstacks `as_scope`. + fn finishCoercion( + as_scope: *GenZir, + parent_gz: *GenZir, + ri: ResultInfo, + src_node: Ast.Node.Index, + result: Zir.Inst.Ref, + dest_type: Zir.Inst.Ref, + ) InnerError!Zir.Inst.Ref { + assert(as_scope.instructions == parent_gz.instructions); + const astgen = as_scope.astgen; + if (as_scope.rvalue_rl_count == 1) { + // Busted! This expression didn't actually need a pointer. + const zir_tags = astgen.instructions.items(.tag); + const zir_datas = astgen.instructions.items(.data); + var src: usize = as_scope.instructions_top; + var dst: usize = src; + while (src < as_scope.instructions.items.len) : (src += 1) { + const src_inst = as_scope.instructions.items[src]; + if (indexToRef(src_inst) == as_scope.rl_ptr) continue; + if (zir_tags[src_inst] == .store_to_block_ptr) { + if (zir_datas[src_inst].bin.lhs == as_scope.rl_ptr) continue; + } + as_scope.instructions.items[dst] = src_inst; + dst += 1; + } + parent_gz.instructions.items.len -= src - dst; + as_scope.instructions_top = GenZir.unstacked_top; + // as_scope now unstacked, can add new instructions to parent_gz + const casted_result = try parent_gz.addBin(.as, dest_type, result); + return rvalue(parent_gz, ri, casted_result, src_node); + } else { + // implicitly move all as_scope instructions to parent_gz + as_scope.instructions_top = GenZir.unstacked_top; + return result; + } + } + + const Label = struct { + token: Ast.TokenIndex, + block_inst: Zir.Inst.Index, + used: bool = false, + }; + + /// Assumes nothing stacked on `gz`. + fn endsWithNoReturn(gz: GenZir) bool { + if (gz.isEmpty()) return false; + const tags = gz.astgen.instructions.items(.tag); + const last_inst = gz.instructions.items[gz.instructions.items.len - 1]; + return tags[last_inst].isNoReturn(); + } + + /// TODO all uses of this should be replaced with uses of `endsWithNoReturn`. + fn refIsNoReturn(gz: GenZir, inst_ref: Zir.Inst.Ref) bool { + if (inst_ref == .unreachable_value) return true; + if (refToIndex(inst_ref)) |inst_index| { + return gz.astgen.instructions.items(.tag)[inst_index].isNoReturn(); + } + return false; + } + + fn nodeIndexToRelative(gz: GenZir, node_index: Ast.Node.Index) i32 { + return @bitCast(i32, node_index) - @bitCast(i32, gz.decl_node_index); + } + + fn tokenIndexToRelative(gz: GenZir, token: Ast.TokenIndex) u32 { + return token - gz.srcToken(); + } + + fn srcToken(gz: GenZir) Ast.TokenIndex { + return gz.astgen.tree.firstToken(gz.decl_node_index); + } + + fn setBreakResultInfo(gz: *GenZir, parent_ri: AstGen.ResultInfo) void { + // Depending on whether the result location is a pointer or value, different + // ZIR needs to be generated. In the former case we rely on storing to the + // pointer to communicate the result, and use breakvoid; in the latter case + // the block break instructions will have the result values. + // One more complication: when the result location is a pointer, we detect + // the scenario where the result location is not consumed. In this case + // we emit ZIR for the block break instructions to have the result values, + // and then rvalue() on that to pass the value to the result location. + switch (parent_ri.rl) { + .coerced_ty => |ty_inst| { + // Type coercion needs to happend before breaks. + gz.rl_ty_inst = ty_inst; + gz.break_result_info = .{ .rl = .{ .ty = ty_inst } }; + }, + .ty => |ty_inst| { + gz.rl_ty_inst = ty_inst; + gz.break_result_info = parent_ri; + }, + + .none, .ref => { + gz.rl_ty_inst = .none; + gz.break_result_info = parent_ri; + }, + + .discard => { + gz.rl_ty_inst = .none; + gz.break_result_info = .{ .rl = .discard }; + }, + + .ptr => |ptr_res| { + gz.rl_ty_inst = .none; + gz.break_result_info = .{ .rl = .{ .ptr = .{ .inst = ptr_res.inst } }, .ctx = parent_ri.ctx }; + }, + + .inferred_ptr => |ptr| { + gz.rl_ty_inst = .none; + gz.rl_ptr = ptr; + gz.break_result_info = .{ .rl = .{ .block_ptr = gz }, .ctx = parent_ri.ctx }; + }, + + .block_ptr => |parent_block_scope| { + gz.rl_ty_inst = parent_block_scope.rl_ty_inst; + gz.rl_ptr = parent_block_scope.rl_ptr; + gz.break_result_info = .{ .rl = .{ .block_ptr = gz }, .ctx = parent_ri.ctx }; + }, + } + } + + /// Assumes nothing stacked on `gz`. Unstacks `gz`. + fn setBoolBrBody(gz: *GenZir, inst: Zir.Inst.Index) !void { + const astgen = gz.astgen; + const gpa = astgen.gpa; + const body = gz.instructionsSlice(); + const body_len = astgen.countBodyLenAfterFixups(body); + try astgen.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Zir.Inst.Block).Struct.fields.len + body_len, + ); + const zir_datas = astgen.instructions.items(.data); + zir_datas[inst].bool_br.payload_index = astgen.addExtraAssumeCapacity( + Zir.Inst.Block{ .body_len = body_len }, + ); + astgen.appendBodyWithFixups(body); + gz.unstack(); + } + + /// Assumes nothing stacked on `gz`. Unstacks `gz`. + fn setBlockBody(gz: *GenZir, inst: Zir.Inst.Index) !void { + const astgen = gz.astgen; + const gpa = astgen.gpa; + const body = gz.instructionsSlice(); + const body_len = astgen.countBodyLenAfterFixups(body); + try astgen.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Zir.Inst.Block).Struct.fields.len + body_len, + ); + const zir_datas = astgen.instructions.items(.data); + zir_datas[inst].pl_node.payload_index = astgen.addExtraAssumeCapacity( + Zir.Inst.Block{ .body_len = body_len }, + ); + astgen.appendBodyWithFixups(body); + gz.unstack(); + } + + /// Assumes nothing stacked on `gz`. Unstacks `gz`. + fn setTryBody(gz: *GenZir, inst: Zir.Inst.Index, operand: Zir.Inst.Ref) !void { + const astgen = gz.astgen; + const gpa = astgen.gpa; + const body = gz.instructionsSlice(); + const body_len = astgen.countBodyLenAfterFixups(body); + try astgen.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Zir.Inst.Try).Struct.fields.len + body_len, + ); + const zir_datas = astgen.instructions.items(.data); + zir_datas[inst].pl_node.payload_index = astgen.addExtraAssumeCapacity( + Zir.Inst.Try{ + .operand = operand, + .body_len = body_len, + }, + ); + astgen.appendBodyWithFixups(body); + gz.unstack(); + } + + /// Must be called with the following stack set up: + /// * gz (bottom) + /// * align_gz + /// * addrspace_gz + /// * section_gz + /// * cc_gz + /// * ret_gz + /// * body_gz (top) + /// Unstacks all of those except for `gz`. + fn addFunc(gz: *GenZir, args: struct { + src_node: Ast.Node.Index, + lbrace_line: u32 = 0, + lbrace_column: u32 = 0, + param_block: Zir.Inst.Index, + + align_gz: ?*GenZir, + addrspace_gz: ?*GenZir, + section_gz: ?*GenZir, + cc_gz: ?*GenZir, + ret_gz: ?*GenZir, + body_gz: ?*GenZir, + + align_ref: Zir.Inst.Ref, + addrspace_ref: Zir.Inst.Ref, + section_ref: Zir.Inst.Ref, + cc_ref: Zir.Inst.Ref, + ret_ref: Zir.Inst.Ref, + + lib_name: u32, + noalias_bits: u32, + is_var_args: bool, + is_inferred_error: bool, + is_test: bool, + is_extern: bool, + is_noinline: bool, + }) !Zir.Inst.Ref { + assert(args.src_node != 0); + const astgen = gz.astgen; + const gpa = astgen.gpa; + const ret_ref = if (args.ret_ref == .void_type) .none else args.ret_ref; + const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + + try astgen.instructions.ensureUnusedCapacity(gpa, 1); + + var body: []Zir.Inst.Index = &[0]Zir.Inst.Index{}; + var ret_body: []Zir.Inst.Index = &[0]Zir.Inst.Index{}; + var src_locs_buffer: [3]u32 = undefined; + var src_locs: []u32 = src_locs_buffer[0..0]; + if (args.body_gz) |body_gz| { + const tree = astgen.tree; + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + const token_starts = tree.tokens.items(.start); + const fn_decl = args.src_node; + assert(node_tags[fn_decl] == .fn_decl or node_tags[fn_decl] == .test_decl); + const block = node_datas[fn_decl].rhs; + const rbrace_start = token_starts[tree.lastToken(block)]; + astgen.advanceSourceCursor(rbrace_start); + const rbrace_line = @intCast(u32, astgen.source_line - gz.decl_line); + const rbrace_column = @intCast(u32, astgen.source_column); + + const columns = args.lbrace_column | (rbrace_column << 16); + src_locs_buffer[0] = args.lbrace_line; + src_locs_buffer[1] = rbrace_line; + src_locs_buffer[2] = columns; + src_locs = &src_locs_buffer; + + body = body_gz.instructionsSlice(); + if (args.ret_gz) |ret_gz| + ret_body = ret_gz.instructionsSliceUpto(body_gz); + } else { + if (args.ret_gz) |ret_gz| + ret_body = ret_gz.instructionsSlice(); + } + const body_len = astgen.countBodyLenAfterFixups(body); + + if (args.cc_ref != .none or args.lib_name != 0 or args.is_var_args or args.is_test or + args.is_extern or args.align_ref != .none or args.section_ref != .none or + args.addrspace_ref != .none or args.noalias_bits != 0 or args.is_noinline) + { + var align_body: []Zir.Inst.Index = &.{}; + var addrspace_body: []Zir.Inst.Index = &.{}; + var section_body: []Zir.Inst.Index = &.{}; + var cc_body: []Zir.Inst.Index = &.{}; + if (args.ret_gz != null) { + align_body = args.align_gz.?.instructionsSliceUpto(args.addrspace_gz.?); + addrspace_body = args.addrspace_gz.?.instructionsSliceUpto(args.section_gz.?); + section_body = args.section_gz.?.instructionsSliceUpto(args.cc_gz.?); + cc_body = args.cc_gz.?.instructionsSliceUpto(args.ret_gz.?); + } + + try astgen.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Zir.Inst.FuncFancy).Struct.fields.len + + fancyFnExprExtraLen(astgen, align_body, args.align_ref) + + fancyFnExprExtraLen(astgen, addrspace_body, args.addrspace_ref) + + fancyFnExprExtraLen(astgen, section_body, args.section_ref) + + fancyFnExprExtraLen(astgen, cc_body, args.cc_ref) + + fancyFnExprExtraLen(astgen, ret_body, ret_ref) + + body_len + src_locs.len + + @boolToInt(args.lib_name != 0) + + @boolToInt(args.noalias_bits != 0), + ); + const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.FuncFancy{ + .param_block = args.param_block, + .body_len = body_len, + .bits = .{ + .is_var_args = args.is_var_args, + .is_inferred_error = args.is_inferred_error, + .is_test = args.is_test, + .is_extern = args.is_extern, + .is_noinline = args.is_noinline, + .has_lib_name = args.lib_name != 0, + .has_any_noalias = args.noalias_bits != 0, + + .has_align_ref = args.align_ref != .none, + .has_addrspace_ref = args.addrspace_ref != .none, + .has_section_ref = args.section_ref != .none, + .has_cc_ref = args.cc_ref != .none, + .has_ret_ty_ref = ret_ref != .none, + + .has_align_body = align_body.len != 0, + .has_addrspace_body = addrspace_body.len != 0, + .has_section_body = section_body.len != 0, + .has_cc_body = cc_body.len != 0, + .has_ret_ty_body = ret_body.len != 0, + }, + }); + if (args.lib_name != 0) { + astgen.extra.appendAssumeCapacity(args.lib_name); + } + + const zir_datas = astgen.instructions.items(.data); + if (align_body.len != 0) { + astgen.extra.appendAssumeCapacity(countBodyLenAfterFixups(astgen, align_body)); + astgen.appendBodyWithFixups(align_body); + const inst_data = zir_datas[align_body[align_body.len - 1]].@"break"; + astgen.extra.items[inst_data.payload_index] = new_index; + } else if (args.align_ref != .none) { + astgen.extra.appendAssumeCapacity(@enumToInt(args.align_ref)); + } + if (addrspace_body.len != 0) { + astgen.extra.appendAssumeCapacity(countBodyLenAfterFixups(astgen, addrspace_body)); + astgen.appendBodyWithFixups(addrspace_body); + const inst_data = zir_datas[addrspace_body[addrspace_body.len - 1]].@"break"; + astgen.extra.items[inst_data.payload_index] = new_index; + } else if (args.addrspace_ref != .none) { + astgen.extra.appendAssumeCapacity(@enumToInt(args.addrspace_ref)); + } + if (section_body.len != 0) { + astgen.extra.appendAssumeCapacity(countBodyLenAfterFixups(astgen, section_body)); + astgen.appendBodyWithFixups(section_body); + const inst_data = zir_datas[section_body[section_body.len - 1]].@"break"; + astgen.extra.items[inst_data.payload_index] = new_index; + } else if (args.section_ref != .none) { + astgen.extra.appendAssumeCapacity(@enumToInt(args.section_ref)); + } + if (cc_body.len != 0) { + astgen.extra.appendAssumeCapacity(countBodyLenAfterFixups(astgen, cc_body)); + astgen.appendBodyWithFixups(cc_body); + const inst_data = zir_datas[cc_body[cc_body.len - 1]].@"break"; + astgen.extra.items[inst_data.payload_index] = new_index; + } else if (args.cc_ref != .none) { + astgen.extra.appendAssumeCapacity(@enumToInt(args.cc_ref)); + } + if (ret_body.len != 0) { + astgen.extra.appendAssumeCapacity(countBodyLenAfterFixups(astgen, ret_body)); + astgen.appendBodyWithFixups(ret_body); + const inst_data = zir_datas[ret_body[ret_body.len - 1]].@"break"; + astgen.extra.items[inst_data.payload_index] = new_index; + } else if (ret_ref != .none) { + astgen.extra.appendAssumeCapacity(@enumToInt(ret_ref)); + } + + if (args.noalias_bits != 0) { + astgen.extra.appendAssumeCapacity(args.noalias_bits); + } + + astgen.appendBodyWithFixups(body); + astgen.extra.appendSliceAssumeCapacity(src_locs); + + // Order is important when unstacking. + if (args.body_gz) |body_gz| body_gz.unstack(); + if (args.ret_gz != null) { + args.ret_gz.?.unstack(); + args.cc_gz.?.unstack(); + args.section_gz.?.unstack(); + args.addrspace_gz.?.unstack(); + args.align_gz.?.unstack(); + } + + try gz.instructions.ensureUnusedCapacity(gpa, 1); + + astgen.instructions.appendAssumeCapacity(.{ + .tag = .func_fancy, + .data = .{ .pl_node = .{ + .src_node = gz.nodeIndexToRelative(args.src_node), + .payload_index = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return indexToRef(new_index); + } else { + try astgen.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Zir.Inst.Func).Struct.fields.len + 1 + + fancyFnExprExtraLen(astgen, ret_body, ret_ref) + + body_len + src_locs.len, + ); + + const ret_body_len = if (ret_body.len != 0) + countBodyLenAfterFixups(astgen, ret_body) + else + @boolToInt(ret_ref != .none); + + const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.Func{ + .param_block = args.param_block, + .ret_body_len = ret_body_len, + .body_len = body_len, + }); + const zir_datas = astgen.instructions.items(.data); + if (ret_body.len != 0) { + astgen.appendBodyWithFixups(ret_body); + + const inst_data = zir_datas[ret_body[ret_body.len - 1]].@"break"; + astgen.extra.items[inst_data.payload_index] = new_index; + } else if (ret_ref != .none) { + astgen.extra.appendAssumeCapacity(@enumToInt(ret_ref)); + } + astgen.appendBodyWithFixups(body); + astgen.extra.appendSliceAssumeCapacity(src_locs); + + // Order is important when unstacking. + if (args.body_gz) |body_gz| body_gz.unstack(); + if (args.ret_gz) |ret_gz| ret_gz.unstack(); + if (args.cc_gz) |cc_gz| cc_gz.unstack(); + if (args.section_gz) |section_gz| section_gz.unstack(); + if (args.addrspace_gz) |addrspace_gz| addrspace_gz.unstack(); + if (args.align_gz) |align_gz| align_gz.unstack(); + + try gz.instructions.ensureUnusedCapacity(gpa, 1); + + const tag: Zir.Inst.Tag = if (args.is_inferred_error) .func_inferred else .func; + astgen.instructions.appendAssumeCapacity(.{ + .tag = tag, + .data = .{ .pl_node = .{ + .src_node = gz.nodeIndexToRelative(args.src_node), + .payload_index = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return indexToRef(new_index); + } + } + + fn fancyFnExprExtraLen(astgen: *AstGen, body: []Zir.Inst.Index, ref: Zir.Inst.Ref) u32 { + // In the case of non-empty body, there is one for the body length, + // and then one for each instruction. + return countBodyLenAfterFixups(astgen, body) + @boolToInt(ref != .none); + } + + fn addVar(gz: *GenZir, args: struct { + align_inst: Zir.Inst.Ref, + lib_name: u32, + var_type: Zir.Inst.Ref, + init: Zir.Inst.Ref, + is_extern: bool, + is_threadlocal: bool, + }) !Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try astgen.instructions.ensureUnusedCapacity(gpa, 1); + + try astgen.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Zir.Inst.ExtendedVar).Struct.fields.len + + @boolToInt(args.lib_name != 0) + + @boolToInt(args.align_inst != .none) + + @boolToInt(args.init != .none), + ); + const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.ExtendedVar{ + .var_type = args.var_type, + }); + if (args.lib_name != 0) { + astgen.extra.appendAssumeCapacity(args.lib_name); + } + if (args.align_inst != .none) { + astgen.extra.appendAssumeCapacity(@enumToInt(args.align_inst)); + } + if (args.init != .none) { + astgen.extra.appendAssumeCapacity(@enumToInt(args.init)); + } + + const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + astgen.instructions.appendAssumeCapacity(.{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = .variable, + .small = @bitCast(u16, Zir.Inst.ExtendedVar.Small{ + .has_lib_name = args.lib_name != 0, + .has_align = args.align_inst != .none, + .has_init = args.init != .none, + .is_extern = args.is_extern, + .is_threadlocal = args.is_threadlocal, + }), + .operand = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return indexToRef(new_index); + } + + /// Note that this returns a `Zir.Inst.Index` not a ref. + /// Leaves the `payload_index` field undefined. + fn addBoolBr( + gz: *GenZir, + tag: Zir.Inst.Tag, + lhs: Zir.Inst.Ref, + ) !Zir.Inst.Index { + assert(lhs != .none); + const gpa = gz.astgen.gpa; + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + + const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + gz.astgen.instructions.appendAssumeCapacity(.{ + .tag = tag, + .data = .{ .bool_br = .{ + .lhs = lhs, + .payload_index = undefined, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return new_index; + } + + fn addInt(gz: *GenZir, integer: u64) !Zir.Inst.Ref { + return gz.add(.{ + .tag = .int, + .data = .{ .int = integer }, + }); + } + + fn addIntBig(gz: *GenZir, limbs: []const std.math.big.Limb) !Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try astgen.instructions.ensureUnusedCapacity(gpa, 1); + try astgen.string_bytes.ensureUnusedCapacity(gpa, @sizeOf(std.math.big.Limb) * limbs.len); + + const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + astgen.instructions.appendAssumeCapacity(.{ + .tag = .int_big, + .data = .{ .str = .{ + .start = @intCast(u32, astgen.string_bytes.items.len), + .len = @intCast(u32, limbs.len), + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + astgen.string_bytes.appendSliceAssumeCapacity(mem.sliceAsBytes(limbs)); + return indexToRef(new_index); + } + + fn addFloat(gz: *GenZir, number: f64) !Zir.Inst.Ref { + return gz.add(.{ + .tag = .float, + .data = .{ .float = number }, + }); + } + + fn addUnNode( + gz: *GenZir, + tag: Zir.Inst.Tag, + operand: Zir.Inst.Ref, + /// Absolute node index. This function does the conversion to offset from Decl. + src_node: Ast.Node.Index, + ) !Zir.Inst.Ref { + assert(operand != .none); + return gz.add(.{ + .tag = tag, + .data = .{ .un_node = .{ + .operand = operand, + .src_node = gz.nodeIndexToRelative(src_node), + } }, + }); + } + + fn makeUnNode( + gz: *GenZir, + tag: Zir.Inst.Tag, + operand: Zir.Inst.Ref, + /// Absolute node index. This function does the conversion to offset from Decl. + src_node: Ast.Node.Index, + ) !Zir.Inst.Index { + assert(operand != .none); + const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + try gz.astgen.instructions.append(gz.astgen.gpa, .{ + .tag = tag, + .data = .{ .un_node = .{ + .operand = operand, + .src_node = gz.nodeIndexToRelative(src_node), + } }, + }); + return new_index; + } + + fn addPlNode( + gz: *GenZir, + tag: Zir.Inst.Tag, + /// Absolute node index. This function does the conversion to offset from Decl. + src_node: Ast.Node.Index, + extra: anytype, + ) !Zir.Inst.Ref { + const gpa = gz.astgen.gpa; + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + + const payload_index = try gz.astgen.addExtra(extra); + const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + gz.astgen.instructions.appendAssumeCapacity(.{ + .tag = tag, + .data = .{ .pl_node = .{ + .src_node = gz.nodeIndexToRelative(src_node), + .payload_index = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return indexToRef(new_index); + } + + fn addPlNodePayloadIndex( + gz: *GenZir, + tag: Zir.Inst.Tag, + /// Absolute node index. This function does the conversion to offset from Decl. + src_node: Ast.Node.Index, + payload_index: u32, + ) !Zir.Inst.Ref { + return try gz.add(.{ + .tag = tag, + .data = .{ .pl_node = .{ + .src_node = gz.nodeIndexToRelative(src_node), + .payload_index = payload_index, + } }, + }); + } + + /// Supports `param_gz` stacked on `gz`. Assumes nothing stacked on `param_gz`. Unstacks `param_gz`. + fn addParam( + gz: *GenZir, + param_gz: *GenZir, + tag: Zir.Inst.Tag, + /// Absolute token index. This function does the conversion to Decl offset. + abs_tok_index: Ast.TokenIndex, + name: u32, + first_doc_comment: ?Ast.TokenIndex, + ) !Zir.Inst.Index { + const gpa = gz.astgen.gpa; + const param_body = param_gz.instructionsSlice(); + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Param).Struct.fields.len + + param_body.len); + + const doc_comment_index = if (first_doc_comment) |first| + try gz.astgen.docCommentAsStringFromFirst(abs_tok_index, first) + else + 0; + + const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.Param{ + .name = name, + .doc_comment = doc_comment_index, + .body_len = @intCast(u32, param_body.len), + }); + gz.astgen.extra.appendSliceAssumeCapacity(param_body); + param_gz.unstack(); + + const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + gz.astgen.instructions.appendAssumeCapacity(.{ + .tag = tag, + .data = .{ .pl_tok = .{ + .src_tok = gz.tokenIndexToRelative(abs_tok_index), + .payload_index = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return new_index; + } + + fn addExtendedPayload(gz: *GenZir, opcode: Zir.Inst.Extended, extra: anytype) !Zir.Inst.Ref { + return addExtendedPayloadSmall(gz, opcode, undefined, extra); + } + + fn addExtendedPayloadSmall( + gz: *GenZir, + opcode: Zir.Inst.Extended, + small: u16, + extra: anytype, + ) !Zir.Inst.Ref { + const gpa = gz.astgen.gpa; + + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + + const payload_index = try gz.astgen.addExtra(extra); + const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + gz.astgen.instructions.appendAssumeCapacity(.{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = opcode, + .small = small, + .operand = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return indexToRef(new_index); + } + + fn addExtendedMultiOp( + gz: *GenZir, + opcode: Zir.Inst.Extended, + node: Ast.Node.Index, + operands: []const Zir.Inst.Ref, + ) !Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try astgen.instructions.ensureUnusedCapacity(gpa, 1); + try astgen.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Zir.Inst.NodeMultiOp).Struct.fields.len + operands.len, + ); + + const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.NodeMultiOp{ + .src_node = gz.nodeIndexToRelative(node), + }); + const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + astgen.instructions.appendAssumeCapacity(.{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = opcode, + .small = @intCast(u16, operands.len), + .operand = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + astgen.appendRefsAssumeCapacity(operands); + return indexToRef(new_index); + } + + fn addExtendedMultiOpPayloadIndex( + gz: *GenZir, + opcode: Zir.Inst.Extended, + payload_index: u32, + trailing_len: usize, + ) !Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try astgen.instructions.ensureUnusedCapacity(gpa, 1); + const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + astgen.instructions.appendAssumeCapacity(.{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = opcode, + .small = @intCast(u16, trailing_len), + .operand = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return indexToRef(new_index); + } + + fn addUnTok( + gz: *GenZir, + tag: Zir.Inst.Tag, + operand: Zir.Inst.Ref, + /// Absolute token index. This function does the conversion to Decl offset. + abs_tok_index: Ast.TokenIndex, + ) !Zir.Inst.Ref { + assert(operand != .none); + return gz.add(.{ + .tag = tag, + .data = .{ .un_tok = .{ + .operand = operand, + .src_tok = gz.tokenIndexToRelative(abs_tok_index), + } }, + }); + } + + fn makeUnTok( + gz: *GenZir, + tag: Zir.Inst.Tag, + operand: Zir.Inst.Ref, + /// Absolute token index. This function does the conversion to Decl offset. + abs_tok_index: Ast.TokenIndex, + ) !Zir.Inst.Index { + const astgen = gz.astgen; + const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + assert(operand != .none); + try astgen.instructions.append(astgen.gpa, .{ + .tag = tag, + .data = .{ .un_tok = .{ + .operand = operand, + .src_tok = gz.tokenIndexToRelative(abs_tok_index), + } }, + }); + return new_index; + } + + fn addStrTok( + gz: *GenZir, + tag: Zir.Inst.Tag, + str_index: u32, + /// Absolute token index. This function does the conversion to Decl offset. + abs_tok_index: Ast.TokenIndex, + ) !Zir.Inst.Ref { + return gz.add(.{ + .tag = tag, + .data = .{ .str_tok = .{ + .start = str_index, + .src_tok = gz.tokenIndexToRelative(abs_tok_index), + } }, + }); + } + + fn addSaveErrRetIndex( + gz: *GenZir, + cond: union(enum) { + always: void, + if_of_error_type: Zir.Inst.Ref, + }, + ) !Zir.Inst.Index { + return gz.addAsIndex(.{ + .tag = .save_err_ret_index, + .data = .{ .save_err_ret_index = .{ + .operand = if (cond == .if_of_error_type) cond.if_of_error_type else .none, + } }, + }); + } + + const BranchTarget = union(enum) { + ret, + block: Zir.Inst.Index, + }; + + fn addRestoreErrRetIndex( + gz: *GenZir, + bt: BranchTarget, + cond: union(enum) { + always: void, + if_non_error: Zir.Inst.Ref, + }, + ) !Zir.Inst.Index { + return gz.addAsIndex(.{ + .tag = .restore_err_ret_index, + .data = .{ .restore_err_ret_index = .{ + .block = switch (bt) { + .ret => .none, + .block => |b| Zir.indexToRef(b), + }, + .operand = if (cond == .if_non_error) cond.if_non_error else .none, + } }, + }); + } + + fn addBreak( + gz: *GenZir, + tag: Zir.Inst.Tag, + block_inst: Zir.Inst.Index, + operand: Zir.Inst.Ref, + ) !Zir.Inst.Index { + const gpa = gz.astgen.gpa; + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + + const extra: Zir.Inst.Break = .{ + .block_inst = block_inst, + .operand_src_node = Zir.Inst.Break.no_src_node, + }; + const payload_index = try gz.astgen.addExtra(extra); + const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + gz.astgen.instructions.appendAssumeCapacity(.{ + .tag = tag, + .data = .{ .@"break" = .{ + .operand = operand, + .payload_index = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return new_index; + } + + fn makeBreak( + gz: *GenZir, + tag: Zir.Inst.Tag, + block_inst: Zir.Inst.Index, + operand: Zir.Inst.Ref, + ) !Zir.Inst.Index { + const gpa = gz.astgen.gpa; + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + + const extra: Zir.Inst.Break = .{ + .block_inst = block_inst, + .operand_src_node = Zir.Inst.Break.no_src_node, + }; + const payload_index = try gz.astgen.addExtra(extra); + const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + gz.astgen.instructions.appendAssumeCapacity(.{ + .tag = tag, + .data = .{ .@"break" = .{ + .operand = operand, + .payload_index = payload_index, + } }, + }); + return new_index; + } + + fn addBreakWithSrcNode( + gz: *GenZir, + tag: Zir.Inst.Tag, + block_inst: Zir.Inst.Index, + operand: Zir.Inst.Ref, + operand_src_node: Ast.Node.Index, + ) !Zir.Inst.Index { + const gpa = gz.astgen.gpa; + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + + const extra: Zir.Inst.Break = .{ + .block_inst = block_inst, + .operand_src_node = gz.nodeIndexToRelative(operand_src_node), + }; + const payload_index = try gz.astgen.addExtra(extra); + const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + gz.astgen.instructions.appendAssumeCapacity(.{ + .tag = tag, + .data = .{ .@"break" = .{ + .operand = operand, + .payload_index = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return new_index; + } + + fn makeBreakWithSrcNode( + gz: *GenZir, + tag: Zir.Inst.Tag, + block_inst: Zir.Inst.Index, + operand: Zir.Inst.Ref, + operand_src_node: Ast.Node.Index, + ) !Zir.Inst.Index { + const gpa = gz.astgen.gpa; + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + + const extra: Zir.Inst.Break = .{ + .block_inst = block_inst, + .operand_src_node = gz.nodeIndexToRelative(operand_src_node), + }; + const payload_index = try gz.astgen.addExtra(extra); + const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + gz.astgen.instructions.appendAssumeCapacity(.{ + .tag = tag, + .data = .{ .@"break" = .{ + .operand = operand, + .payload_index = payload_index, + } }, + }); + return new_index; + } + + fn addBin( + gz: *GenZir, + tag: Zir.Inst.Tag, + lhs: Zir.Inst.Ref, + rhs: Zir.Inst.Ref, + ) !Zir.Inst.Ref { + assert(lhs != .none); + assert(rhs != .none); + return gz.add(.{ + .tag = tag, + .data = .{ .bin = .{ + .lhs = lhs, + .rhs = rhs, + } }, + }); + } + + fn addDefer(gz: *GenZir, index: u32, len: u32) !void { + _ = try gz.add(.{ + .tag = .@"defer", + .data = .{ .@"defer" = .{ + .index = index, + .len = len, + } }, + }); + } + + fn addDecl( + gz: *GenZir, + tag: Zir.Inst.Tag, + decl_index: u32, + src_node: Ast.Node.Index, + ) !Zir.Inst.Ref { + return gz.add(.{ + .tag = tag, + .data = .{ .pl_node = .{ + .src_node = gz.nodeIndexToRelative(src_node), + .payload_index = decl_index, + } }, + }); + } + + fn addNode( + gz: *GenZir, + tag: Zir.Inst.Tag, + /// Absolute node index. This function does the conversion to offset from Decl. + src_node: Ast.Node.Index, + ) !Zir.Inst.Ref { + return gz.add(.{ + .tag = tag, + .data = .{ .node = gz.nodeIndexToRelative(src_node) }, + }); + } + + fn addInstNode( + gz: *GenZir, + tag: Zir.Inst.Tag, + inst: Zir.Inst.Index, + /// Absolute node index. This function does the conversion to offset from Decl. + src_node: Ast.Node.Index, + ) !Zir.Inst.Ref { + return gz.add(.{ + .tag = tag, + .data = .{ .inst_node = .{ + .inst = inst, + .src_node = gz.nodeIndexToRelative(src_node), + } }, + }); + } + + fn addNodeExtended( + gz: *GenZir, + opcode: Zir.Inst.Extended, + /// Absolute node index. This function does the conversion to offset from Decl. + src_node: Ast.Node.Index, + ) !Zir.Inst.Ref { + return gz.add(.{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = opcode, + .small = undefined, + .operand = @bitCast(u32, gz.nodeIndexToRelative(src_node)), + } }, + }); + } + + fn addAllocExtended( + gz: *GenZir, + args: struct { + /// Absolute node index. This function does the conversion to offset from Decl. + node: Ast.Node.Index, + type_inst: Zir.Inst.Ref, + align_inst: Zir.Inst.Ref, + is_const: bool, + is_comptime: bool, + }, + ) !Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try astgen.instructions.ensureUnusedCapacity(gpa, 1); + try astgen.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Zir.Inst.AllocExtended).Struct.fields.len + + @as(usize, @boolToInt(args.type_inst != .none)) + + @as(usize, @boolToInt(args.align_inst != .none)), + ); + const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.AllocExtended{ + .src_node = gz.nodeIndexToRelative(args.node), + }); + if (args.type_inst != .none) { + astgen.extra.appendAssumeCapacity(@enumToInt(args.type_inst)); + } + if (args.align_inst != .none) { + astgen.extra.appendAssumeCapacity(@enumToInt(args.align_inst)); + } + + const has_type: u4 = @boolToInt(args.type_inst != .none); + const has_align: u4 = @boolToInt(args.align_inst != .none); + const is_const: u4 = @boolToInt(args.is_const); + const is_comptime: u4 = @boolToInt(args.is_comptime); + const small: u16 = has_type | (has_align << 1) | (is_const << 2) | (is_comptime << 3); + + const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + astgen.instructions.appendAssumeCapacity(.{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = .alloc, + .small = small, + .operand = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return indexToRef(new_index); + } + + fn addAsm( + gz: *GenZir, + args: struct { + tag: Zir.Inst.Extended, + /// Absolute node index. This function does the conversion to offset from Decl. + node: Ast.Node.Index, + asm_source: u32, + output_type_bits: u32, + is_volatile: bool, + outputs: []const Zir.Inst.Asm.Output, + inputs: []const Zir.Inst.Asm.Input, + clobbers: []const u32, + }, + ) !Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try astgen.instructions.ensureUnusedCapacity(gpa, 1); + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Asm).Struct.fields.len + + args.outputs.len * @typeInfo(Zir.Inst.Asm.Output).Struct.fields.len + + args.inputs.len * @typeInfo(Zir.Inst.Asm.Input).Struct.fields.len + + args.clobbers.len); + + const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.Asm{ + .src_node = gz.nodeIndexToRelative(args.node), + .asm_source = args.asm_source, + .output_type_bits = args.output_type_bits, + }); + for (args.outputs) |output| { + _ = gz.astgen.addExtraAssumeCapacity(output); + } + for (args.inputs) |input| { + _ = gz.astgen.addExtraAssumeCapacity(input); + } + gz.astgen.extra.appendSliceAssumeCapacity(args.clobbers); + + // * 0b00000000_000XXXXX - `outputs_len`. + // * 0b000000XX_XXX00000 - `inputs_len`. + // * 0b0XXXXX00_00000000 - `clobbers_len`. + // * 0bX0000000_00000000 - is volatile + const small: u16 = @intCast(u16, args.outputs.len) | + @intCast(u16, args.inputs.len << 5) | + @intCast(u16, args.clobbers.len << 10) | + (@as(u16, @boolToInt(args.is_volatile)) << 15); + + const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + astgen.instructions.appendAssumeCapacity(.{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = args.tag, + .small = small, + .operand = payload_index, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return indexToRef(new_index); + } + + /// Note that this returns a `Zir.Inst.Index` not a ref. + /// Does *not* append the block instruction to the scope. + /// Leaves the `payload_index` field undefined. + fn makeBlockInst(gz: *GenZir, tag: Zir.Inst.Tag, node: Ast.Node.Index) !Zir.Inst.Index { + const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + const gpa = gz.astgen.gpa; + try gz.astgen.instructions.append(gpa, .{ + .tag = tag, + .data = .{ .pl_node = .{ + .src_node = gz.nodeIndexToRelative(node), + .payload_index = undefined, + } }, + }); + return new_index; + } + + /// Note that this returns a `Zir.Inst.Index` not a ref. + /// Leaves the `payload_index` field undefined. + fn addCondBr(gz: *GenZir, tag: Zir.Inst.Tag, node: Ast.Node.Index) !Zir.Inst.Index { + const gpa = gz.astgen.gpa; + try gz.instructions.ensureUnusedCapacity(gpa, 1); + const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + try gz.astgen.instructions.append(gpa, .{ + .tag = tag, + .data = .{ .pl_node = .{ + .src_node = gz.nodeIndexToRelative(node), + .payload_index = undefined, + } }, + }); + gz.instructions.appendAssumeCapacity(new_index); + return new_index; + } + + fn setStruct(gz: *GenZir, inst: Zir.Inst.Index, args: struct { + src_node: Ast.Node.Index, + fields_len: u32, + decls_len: u32, + backing_int_ref: Zir.Inst.Ref, + backing_int_body_len: u32, + layout: std.builtin.Type.ContainerLayout, + known_non_opv: bool, + known_comptime_only: bool, + is_tuple: bool, + }) !void { + const astgen = gz.astgen; + const gpa = astgen.gpa; + + try astgen.extra.ensureUnusedCapacity(gpa, 6); + const payload_index = @intCast(u32, astgen.extra.items.len); + + if (args.src_node != 0) { + const node_offset = gz.nodeIndexToRelative(args.src_node); + astgen.extra.appendAssumeCapacity(@bitCast(u32, node_offset)); + } + if (args.fields_len != 0) { + astgen.extra.appendAssumeCapacity(args.fields_len); + } + if (args.decls_len != 0) { + astgen.extra.appendAssumeCapacity(args.decls_len); + } + if (args.backing_int_ref != .none) { + astgen.extra.appendAssumeCapacity(args.backing_int_body_len); + if (args.backing_int_body_len == 0) { + astgen.extra.appendAssumeCapacity(@enumToInt(args.backing_int_ref)); + } + } + astgen.instructions.set(inst, .{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = .struct_decl, + .small = @bitCast(u16, Zir.Inst.StructDecl.Small{ + .has_src_node = args.src_node != 0, + .has_fields_len = args.fields_len != 0, + .has_decls_len = args.decls_len != 0, + .has_backing_int = args.backing_int_ref != .none, + .known_non_opv = args.known_non_opv, + .known_comptime_only = args.known_comptime_only, + .is_tuple = args.is_tuple, + .name_strategy = gz.anon_name_strategy, + .layout = args.layout, + }), + .operand = payload_index, + } }, + }); + } + + fn setUnion(gz: *GenZir, inst: Zir.Inst.Index, args: struct { + src_node: Ast.Node.Index, + tag_type: Zir.Inst.Ref, + body_len: u32, + fields_len: u32, + decls_len: u32, + layout: std.builtin.Type.ContainerLayout, + auto_enum_tag: bool, + }) !void { + const astgen = gz.astgen; + const gpa = astgen.gpa; + + try astgen.extra.ensureUnusedCapacity(gpa, 5); + const payload_index = @intCast(u32, astgen.extra.items.len); + + if (args.src_node != 0) { + const node_offset = gz.nodeIndexToRelative(args.src_node); + astgen.extra.appendAssumeCapacity(@bitCast(u32, node_offset)); + } + if (args.tag_type != .none) { + astgen.extra.appendAssumeCapacity(@enumToInt(args.tag_type)); + } + if (args.body_len != 0) { + astgen.extra.appendAssumeCapacity(args.body_len); + } + if (args.fields_len != 0) { + astgen.extra.appendAssumeCapacity(args.fields_len); + } + if (args.decls_len != 0) { + astgen.extra.appendAssumeCapacity(args.decls_len); + } + astgen.instructions.set(inst, .{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = .union_decl, + .small = @bitCast(u16, Zir.Inst.UnionDecl.Small{ + .has_src_node = args.src_node != 0, + .has_tag_type = args.tag_type != .none, + .has_body_len = args.body_len != 0, + .has_fields_len = args.fields_len != 0, + .has_decls_len = args.decls_len != 0, + .name_strategy = gz.anon_name_strategy, + .layout = args.layout, + .auto_enum_tag = args.auto_enum_tag, + }), + .operand = payload_index, + } }, + }); + } + + fn setEnum(gz: *GenZir, inst: Zir.Inst.Index, args: struct { + src_node: Ast.Node.Index, + tag_type: Zir.Inst.Ref, + body_len: u32, + fields_len: u32, + decls_len: u32, + nonexhaustive: bool, + }) !void { + const astgen = gz.astgen; + const gpa = astgen.gpa; + + try astgen.extra.ensureUnusedCapacity(gpa, 5); + const payload_index = @intCast(u32, astgen.extra.items.len); + + if (args.src_node != 0) { + const node_offset = gz.nodeIndexToRelative(args.src_node); + astgen.extra.appendAssumeCapacity(@bitCast(u32, node_offset)); + } + if (args.tag_type != .none) { + astgen.extra.appendAssumeCapacity(@enumToInt(args.tag_type)); + } + if (args.body_len != 0) { + astgen.extra.appendAssumeCapacity(args.body_len); + } + if (args.fields_len != 0) { + astgen.extra.appendAssumeCapacity(args.fields_len); + } + if (args.decls_len != 0) { + astgen.extra.appendAssumeCapacity(args.decls_len); + } + astgen.instructions.set(inst, .{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = .enum_decl, + .small = @bitCast(u16, Zir.Inst.EnumDecl.Small{ + .has_src_node = args.src_node != 0, + .has_tag_type = args.tag_type != .none, + .has_body_len = args.body_len != 0, + .has_fields_len = args.fields_len != 0, + .has_decls_len = args.decls_len != 0, + .name_strategy = gz.anon_name_strategy, + .nonexhaustive = args.nonexhaustive, + }), + .operand = payload_index, + } }, + }); + } + + fn setOpaque(gz: *GenZir, inst: Zir.Inst.Index, args: struct { + src_node: Ast.Node.Index, + decls_len: u32, + }) !void { + const astgen = gz.astgen; + const gpa = astgen.gpa; + + try astgen.extra.ensureUnusedCapacity(gpa, 2); + const payload_index = @intCast(u32, astgen.extra.items.len); + + if (args.src_node != 0) { + const node_offset = gz.nodeIndexToRelative(args.src_node); + astgen.extra.appendAssumeCapacity(@bitCast(u32, node_offset)); + } + if (args.decls_len != 0) { + astgen.extra.appendAssumeCapacity(args.decls_len); + } + astgen.instructions.set(inst, .{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = .opaque_decl, + .small = @bitCast(u16, Zir.Inst.OpaqueDecl.Small{ + .has_src_node = args.src_node != 0, + .has_decls_len = args.decls_len != 0, + .name_strategy = gz.anon_name_strategy, + }), + .operand = payload_index, + } }, + }); + } + + fn add(gz: *GenZir, inst: Zir.Inst) !Zir.Inst.Ref { + return indexToRef(try gz.addAsIndex(inst)); + } + + fn addAsIndex(gz: *GenZir, inst: Zir.Inst) !Zir.Inst.Index { + const gpa = gz.astgen.gpa; + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + + const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + gz.astgen.instructions.appendAssumeCapacity(inst); + gz.instructions.appendAssumeCapacity(new_index); + return new_index; + } + + fn reserveInstructionIndex(gz: *GenZir) !Zir.Inst.Index { + const gpa = gz.astgen.gpa; + try gz.instructions.ensureUnusedCapacity(gpa, 1); + try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); + + const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + gz.astgen.instructions.len += 1; + gz.instructions.appendAssumeCapacity(new_index); + return new_index; + } + + fn addRet(gz: *GenZir, ri: ResultInfo, operand: Zir.Inst.Ref, node: Ast.Node.Index) !void { + switch (ri.rl) { + .ptr => |ptr_res| _ = try gz.addUnNode(.ret_load, ptr_res.inst, node), + .ty => _ = try gz.addUnNode(.ret_node, operand, node), + else => unreachable, + } + } + + fn addNamespaceCaptures(gz: *GenZir, namespace: *Scope.Namespace) !void { + if (namespace.captures.count() > 0) { + try gz.instructions.ensureUnusedCapacity(gz.astgen.gpa, namespace.captures.count()); + for (namespace.captures.values()) |capture| { + gz.instructions.appendAssumeCapacity(capture); + } + } + } + + fn addDbgVar(gz: *GenZir, tag: Zir.Inst.Tag, name: u32, inst: Zir.Inst.Ref) !void { + if (gz.force_comptime) return; + + _ = try gz.add(.{ .tag = tag, .data = .{ + .str_op = .{ + .str = name, + .operand = inst, + }, + } }); + } + + fn addDbgBlockBegin(gz: *GenZir) !void { + if (gz.force_comptime) return; + + _ = try gz.add(.{ .tag = .dbg_block_begin, .data = undefined }); + } + + fn addDbgBlockEnd(gz: *GenZir) !void { + if (gz.force_comptime) return; + const gpa = gz.astgen.gpa; + + const tags = gz.astgen.instructions.items(.tag); + const last_inst = gz.instructions.items[gz.instructions.items.len - 1]; + // remove dbg_block_begin immediately followed by dbg_block_end + if (tags[last_inst] == .dbg_block_begin) { + _ = gz.instructions.pop(); + return; + } + + const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); + try gz.astgen.instructions.append(gpa, .{ .tag = .dbg_block_end, .data = undefined }); + try gz.instructions.append(gpa, new_index); + } +}; + +/// This can only be for short-lived references; the memory becomes invalidated +/// when another string is added. +fn nullTerminatedString(astgen: AstGen, index: usize) [*:0]const u8 { + return @ptrCast([*:0]const u8, astgen.string_bytes.items.ptr) + index; +} + +/// Local variables shadowing detection, including function parameters. +fn detectLocalShadowing( + astgen: *AstGen, + scope: *Scope, + ident_name: u32, + name_token: Ast.TokenIndex, + token_bytes: []const u8, + id_cat: Scope.IdCat, +) !void { + const gpa = astgen.gpa; + if (token_bytes[0] != '@' and isPrimitive(token_bytes)) { + return astgen.failTokNotes(name_token, "name shadows primitive '{s}'", .{ + token_bytes, + }, &[_]u32{ + try astgen.errNoteTok(name_token, "consider using @\"{s}\" to disambiguate", .{ + token_bytes, + }), + }); + } + + var s = scope; + var outer_scope = false; + while (true) switch (s.tag) { + .local_val => { + const local_val = s.cast(Scope.LocalVal).?; + if (local_val.name == ident_name) { + const name_slice = mem.span(astgen.nullTerminatedString(ident_name)); + const name = try gpa.dupe(u8, name_slice); + defer gpa.free(name); + if (outer_scope) { + return astgen.failTokNotes(name_token, "{s} '{s}' shadows {s} from outer scope", .{ + @tagName(id_cat), name, @tagName(local_val.id_cat), + }, &[_]u32{ + try astgen.errNoteTok( + local_val.token_src, + "previous declaration here", + .{}, + ), + }); + } + return astgen.failTokNotes(name_token, "redeclaration of {s} '{s}'", .{ + @tagName(local_val.id_cat), name, + }, &[_]u32{ + try astgen.errNoteTok( + local_val.token_src, + "previous declaration here", + .{}, + ), + }); + } + s = local_val.parent; + }, + .local_ptr => { + const local_ptr = s.cast(Scope.LocalPtr).?; + if (local_ptr.name == ident_name) { + const name_slice = mem.span(astgen.nullTerminatedString(ident_name)); + const name = try gpa.dupe(u8, name_slice); + defer gpa.free(name); + if (outer_scope) { + return astgen.failTokNotes(name_token, "{s} '{s}' shadows {s} from outer scope", .{ + @tagName(id_cat), name, @tagName(local_ptr.id_cat), + }, &[_]u32{ + try astgen.errNoteTok( + local_ptr.token_src, + "previous declaration here", + .{}, + ), + }); + } + return astgen.failTokNotes(name_token, "redeclaration of {s} '{s}'", .{ + @tagName(local_ptr.id_cat), name, + }, &[_]u32{ + try astgen.errNoteTok( + local_ptr.token_src, + "previous declaration here", + .{}, + ), + }); + } + s = local_ptr.parent; + }, + .namespace, .enum_namespace => { + outer_scope = true; + const ns = s.cast(Scope.Namespace).?; + const decl_node = ns.decls.get(ident_name) orelse { + s = ns.parent; + continue; + }; + const name_slice = mem.span(astgen.nullTerminatedString(ident_name)); + const name = try gpa.dupe(u8, name_slice); + defer gpa.free(name); + return astgen.failTokNotes(name_token, "{s} shadows declaration of '{s}'", .{ + @tagName(id_cat), name, + }, &[_]u32{ + try astgen.errNoteNode(decl_node, "declared here", .{}), + }); + }, + .gen_zir => { + s = s.cast(GenZir).?.parent; + outer_scope = true; + }, + .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, + .top => break, + }; +} + +/// Advances the source cursor to the main token of `node` if not in comptime scope. +/// Usually paired with `emitDbgStmt`. +fn maybeAdvanceSourceCursorToMainToken(gz: *GenZir, node: Ast.Node.Index) void { + if (gz.force_comptime) return; + + const tree = gz.astgen.tree; + const token_starts = tree.tokens.items(.start); + const main_tokens = tree.nodes.items(.main_token); + const node_start = token_starts[main_tokens[node]]; + gz.astgen.advanceSourceCursor(node_start); +} + +/// Advances the source cursor to the beginning of `node`. +fn advanceSourceCursorToNode(astgen: *AstGen, node: Ast.Node.Index) void { + const tree = astgen.tree; + const token_starts = tree.tokens.items(.start); + const node_start = token_starts[tree.firstToken(node)]; + astgen.advanceSourceCursor(node_start); +} + +/// Advances the source cursor to an absolute byte offset `end` in the file. +fn advanceSourceCursor(astgen: *AstGen, end: usize) void { + const source = astgen.tree.source; + var i = astgen.source_offset; + var line = astgen.source_line; + var column = astgen.source_column; + assert(i <= end); + while (i < end) : (i += 1) { + if (source[i] == '\n') { + line += 1; + column = 0; + } else { + column += 1; + } + } + astgen.source_offset = i; + astgen.source_line = line; + astgen.source_column = column; +} + +fn scanDecls(astgen: *AstGen, namespace: *Scope.Namespace, members: []const Ast.Node.Index) !u32 { + const gpa = astgen.gpa; + const tree = astgen.tree; + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + const token_tags = tree.tokens.items(.tag); + var decl_count: u32 = 0; + for (members) |member_node| { + const name_token = switch (node_tags[member_node]) { + .fn_proto_simple, + .fn_proto_multi, + .fn_proto_one, + .fn_proto, + .global_var_decl, + .local_var_decl, + .simple_var_decl, + .aligned_var_decl, + => blk: { + decl_count += 1; + break :blk main_tokens[member_node] + 1; + }, + + .fn_decl => blk: { + decl_count += 1; + const ident = main_tokens[member_node] + 1; + if (token_tags[ident] != .identifier) { + switch (astgen.failNode(member_node, "missing function name", .{})) { + error.AnalysisFail => continue, + error.OutOfMemory => return error.OutOfMemory, + } + } + break :blk ident; + }, + + .@"comptime", .@"usingnamespace", .test_decl => { + decl_count += 1; + continue; + }, + + else => continue, + }; + + const token_bytes = astgen.tree.tokenSlice(name_token); + if (token_bytes[0] != '@' and isPrimitive(token_bytes)) { + switch (astgen.failTokNotes(name_token, "name shadows primitive '{s}'", .{ + token_bytes, + }, &[_]u32{ + try astgen.errNoteTok(name_token, "consider using @\"{s}\" to disambiguate", .{ + token_bytes, + }), + })) { + error.AnalysisFail => continue, + error.OutOfMemory => return error.OutOfMemory, + } + } + + const name_str_index = try astgen.identAsString(name_token); + const gop = try namespace.decls.getOrPut(gpa, name_str_index); + if (gop.found_existing) { + const name = try gpa.dupe(u8, mem.span(astgen.nullTerminatedString(name_str_index))); + defer gpa.free(name); + switch (astgen.failNodeNotes(member_node, "redeclaration of '{s}'", .{ + name, + }, &[_]u32{ + try astgen.errNoteNode(gop.value_ptr.*, "other declaration here", .{}), + })) { + error.AnalysisFail => continue, + error.OutOfMemory => return error.OutOfMemory, + } + } + + var s = namespace.parent; + while (true) switch (s.tag) { + .local_val => { + const local_val = s.cast(Scope.LocalVal).?; + if (local_val.name == name_str_index) { + return astgen.failTokNotes(name_token, "declaration '{s}' shadows {s} from outer scope", .{ + token_bytes, @tagName(local_val.id_cat), + }, &[_]u32{ + try astgen.errNoteTok( + local_val.token_src, + "previous declaration here", + .{}, + ), + }); + } + s = local_val.parent; + }, + .local_ptr => { + const local_ptr = s.cast(Scope.LocalPtr).?; + if (local_ptr.name == name_str_index) { + return astgen.failTokNotes(name_token, "declaration '{s}' shadows {s} from outer scope", .{ + token_bytes, @tagName(local_ptr.id_cat), + }, &[_]u32{ + try astgen.errNoteTok( + local_ptr.token_src, + "previous declaration here", + .{}, + ), + }); + } + s = local_ptr.parent; + }, + .namespace, .enum_namespace => s = s.cast(Scope.Namespace).?.parent, + .gen_zir => s = s.cast(GenZir).?.parent, + .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, + .top => break, + }; + gop.value_ptr.* = member_node; + } + return decl_count; +} + +fn isInferred(astgen: *AstGen, ref: Zir.Inst.Ref) bool { + const inst = refToIndex(ref) orelse return false; + const zir_tags = astgen.instructions.items(.tag); + return switch (zir_tags[inst]) { + .alloc_inferred, + .alloc_inferred_mut, + .alloc_inferred_comptime, + .alloc_inferred_comptime_mut, + => true, + + .extended => { + const zir_data = astgen.instructions.items(.data); + if (zir_data[inst].extended.opcode != .alloc) return false; + const small = @bitCast(Zir.Inst.AllocExtended.Small, zir_data[inst].extended.small); + return !small.has_type; + }, + + else => false, + }; +} + +/// Assumes capacity for body has already been added. Needed capacity taking into +/// account fixups can be found with `countBodyLenAfterFixups`. +fn appendBodyWithFixups(astgen: *AstGen, body: []const Zir.Inst.Index) void { + return appendBodyWithFixupsArrayList(astgen, &astgen.extra, body); +} + +fn appendBodyWithFixupsArrayList( + astgen: *AstGen, + list: *std.ArrayListUnmanaged(u32), + body: []const Zir.Inst.Index, +) void { + for (body) |body_inst| { + appendPossiblyRefdBodyInst(astgen, list, body_inst); + } +} + +fn appendPossiblyRefdBodyInst( + astgen: *AstGen, + list: *std.ArrayListUnmanaged(u32), + body_inst: Zir.Inst.Index, +) void { + list.appendAssumeCapacity(body_inst); + const kv = astgen.ref_table.fetchRemove(body_inst) orelse return; + const ref_inst = kv.value; + return appendPossiblyRefdBodyInst(astgen, list, ref_inst); +} + +fn countBodyLenAfterFixups(astgen: *AstGen, body: []const Zir.Inst.Index) u32 { + var count = body.len; + for (body) |body_inst| { + var check_inst = body_inst; + while (astgen.ref_table.get(check_inst)) |ref_inst| { + count += 1; + check_inst = ref_inst; + } + } + return @intCast(u32, count); +} + +fn emitDbgStmt(gz: *GenZir, line: u32, column: u32) !void { + if (gz.force_comptime) return; + + _ = try gz.add(.{ .tag = .dbg_stmt, .data = .{ + .dbg_stmt = .{ + .line = line, + .column = column, + }, + } }); +} + +fn lowerAstErrors(astgen: *AstGen) !void { + const tree = astgen.tree; + assert(tree.errors.len > 0); + + const gpa = astgen.gpa; + const parse_err = tree.errors[0]; + + var msg: std.ArrayListUnmanaged(u8) = .{}; + defer msg.deinit(gpa); + + const token_starts = tree.tokens.items(.start); + const token_tags = tree.tokens.items(.tag); + + var notes: std.ArrayListUnmanaged(u32) = .{}; + defer notes.deinit(gpa); + + if (token_tags[parse_err.token + @boolToInt(parse_err.token_is_prev)] == .invalid) { + const tok = parse_err.token + @boolToInt(parse_err.token_is_prev); + const bad_off = @intCast(u32, tree.tokenSlice(parse_err.token + @boolToInt(parse_err.token_is_prev)).len); + const byte_abs = token_starts[parse_err.token + @boolToInt(parse_err.token_is_prev)] + bad_off; + try notes.append(gpa, try astgen.errNoteTokOff(tok, bad_off, "invalid byte: '{'}'", .{ + std.zig.fmtEscapes(tree.source[byte_abs..][0..1]), + })); + } + + for (tree.errors[1..]) |note| { + if (!note.is_note) break; + + msg.clearRetainingCapacity(); + try tree.renderError(note, msg.writer(gpa)); + try notes.append(gpa, try astgen.errNoteTok(note.token, "{s}", .{msg.items})); + } + + const extra_offset = tree.errorOffset(parse_err); + msg.clearRetainingCapacity(); + try tree.renderError(parse_err, msg.writer(gpa)); + try astgen.appendErrorTokNotesOff(parse_err.token, extra_offset, "{s}", .{msg.items}, notes.items); +} diff --git a/src/stage2/BuiltinFn.zig b/src/stage2/BuiltinFn.zig new file mode 100644 index 0000000..4a98a5a --- /dev/null +++ b/src/stage2/BuiltinFn.zig @@ -0,0 +1,1007 @@ +const std = @import("std"); + +pub const Tag = enum { + add_with_overflow, + addrspace_cast, + align_cast, + align_of, + as, + async_call, + atomic_load, + atomic_rmw, + atomic_store, + bit_cast, + bit_offset_of, + bool_to_int, + bit_size_of, + breakpoint, + mul_add, + byte_swap, + bit_reverse, + offset_of, + call, + c_define, + c_import, + c_include, + clz, + cmpxchg_strong, + cmpxchg_weak, + compile_error, + compile_log, + const_cast, + ctz, + c_undef, + c_va_arg, + c_va_copy, + c_va_end, + c_va_start, + div_exact, + div_floor, + div_trunc, + embed_file, + enum_to_int, + error_name, + error_return_trace, + error_to_int, + err_set_cast, + @"export", + @"extern", + fence, + field, + field_parent_ptr, + float_cast, + float_to_int, + frame, + Frame, + frame_address, + frame_size, + has_decl, + has_field, + import, + int_cast, + int_to_enum, + int_to_error, + int_to_float, + int_to_ptr, + max, + memcpy, + memset, + min, + wasm_memory_size, + wasm_memory_grow, + mod, + mul_with_overflow, + panic, + pop_count, + prefetch, + ptr_cast, + ptr_to_int, + rem, + return_address, + select, + set_align_stack, + set_cold, + set_eval_branch_quota, + set_float_mode, + set_runtime_safety, + shl_exact, + shl_with_overflow, + shr_exact, + shuffle, + size_of, + splat, + reduce, + src, + sqrt, + sin, + cos, + tan, + exp, + exp2, + log, + log2, + log10, + fabs, + floor, + ceil, + trunc, + round, + sub_with_overflow, + tag_name, + This, + trap, + truncate, + Type, + type_info, + type_name, + TypeOf, + union_init, + Vector, + volatile_cast, + work_item_id, + work_group_size, + work_group_id, +}; + +pub const MemLocRequirement = enum { + /// The builtin never needs a memory location. + never, + /// The builtin always needs a memory location. + always, + /// The builtin forwards the question to argument at index 1. + forward1, +}; + +pub const EvalToError = enum { + /// The builtin cannot possibly evaluate to an error. + never, + /// The builtin will always evaluate to an error. + always, + /// The builtin may or may not evaluate to an error depending on the parameters. + maybe, +}; + +tag: Tag, + +/// Info about the builtin call's ability to take advantage of a result location pointer. +needs_mem_loc: MemLocRequirement = .never, +/// Info about the builtin call's possibility of returning an error. +eval_to_error: EvalToError = .never, +/// `true` if the builtin call can be the left-hand side of an expression (assigned to). +allows_lvalue: bool = false, +/// The number of parameters to this builtin function. `null` means variable number +/// of parameters. +param_count: ?u8, + +pub const list = list: { + @setEvalBranchQuota(3000); + break :list std.ComptimeStringMap(@This(), .{ + .{ + "@addWithOverflow", + .{ + .tag = .add_with_overflow, + .param_count = 2, + }, + }, + .{ + "@addrSpaceCast", + .{ + .tag = .addrspace_cast, + .param_count = 2, + }, + }, + .{ + "@alignCast", + .{ + .tag = .align_cast, + .param_count = 2, + }, + }, + .{ + "@alignOf", + .{ + .tag = .align_of, + .param_count = 1, + }, + }, + .{ + "@as", + .{ + .tag = .as, + .needs_mem_loc = .forward1, + .eval_to_error = .maybe, + .param_count = 2, + }, + }, + .{ + "@asyncCall", + .{ + .tag = .async_call, + .param_count = 4, + }, + }, + .{ + "@atomicLoad", + .{ + .tag = .atomic_load, + .param_count = 3, + }, + }, + .{ + "@atomicRmw", + .{ + .tag = .atomic_rmw, + .param_count = 5, + }, + }, + .{ + "@atomicStore", + .{ + .tag = .atomic_store, + .param_count = 4, + }, + }, + .{ + "@bitCast", + .{ + .tag = .bit_cast, + .needs_mem_loc = .forward1, + .param_count = 2, + }, + }, + .{ + "@bitOffsetOf", + .{ + .tag = .bit_offset_of, + .param_count = 2, + }, + }, + .{ + "@boolToInt", + .{ + .tag = .bool_to_int, + .param_count = 1, + }, + }, + .{ + "@bitSizeOf", + .{ + .tag = .bit_size_of, + .param_count = 1, + }, + }, + .{ + "@breakpoint", + .{ + .tag = .breakpoint, + .param_count = 0, + }, + }, + .{ + "@mulAdd", + .{ + .tag = .mul_add, + .param_count = 4, + }, + }, + .{ + "@byteSwap", + .{ + .tag = .byte_swap, + .param_count = 1, + }, + }, + .{ + "@bitReverse", + .{ + .tag = .bit_reverse, + .param_count = 1, + }, + }, + .{ + "@offsetOf", + .{ + .tag = .offset_of, + .param_count = 2, + }, + }, + .{ + "@call", + .{ + .tag = .call, + .needs_mem_loc = .always, + .eval_to_error = .maybe, + .param_count = 3, + }, + }, + .{ + "@cDefine", + .{ + .tag = .c_define, + .param_count = 2, + }, + }, + .{ + "@cImport", + .{ + .tag = .c_import, + .param_count = 1, + }, + }, + .{ + "@cInclude", + .{ + .tag = .c_include, + .param_count = 1, + }, + }, + .{ + "@clz", + .{ + .tag = .clz, + .param_count = 1, + }, + }, + .{ + "@cmpxchgStrong", + .{ + .tag = .cmpxchg_strong, + .param_count = 6, + }, + }, + .{ + "@cmpxchgWeak", + .{ + .tag = .cmpxchg_weak, + .param_count = 6, + }, + }, + .{ + "@compileError", + .{ + .tag = .compile_error, + .param_count = 1, + }, + }, + .{ + "@compileLog", + .{ + .tag = .compile_log, + .param_count = null, + }, + }, + .{ + "@constCast", + .{ + .tag = .const_cast, + .param_count = 1, + }, + }, + .{ + "@ctz", + .{ + .tag = .ctz, + .param_count = 1, + }, + }, + .{ + "@cUndef", + .{ + .tag = .c_undef, + .param_count = 1, + }, + }, + .{ + "@cVaArg", .{ + .tag = .c_va_arg, + .param_count = 2, + }, + }, + .{ + "@cVaCopy", .{ + .tag = .c_va_copy, + .param_count = 1, + }, + }, + .{ + "@cVaEnd", .{ + .tag = .c_va_end, + .param_count = 1, + }, + }, + .{ + "@cVaStart", .{ + .tag = .c_va_start, + .param_count = 0, + }, + }, + .{ + "@divExact", + .{ + .tag = .div_exact, + .param_count = 2, + }, + }, + .{ + "@divFloor", + .{ + .tag = .div_floor, + .param_count = 2, + }, + }, + .{ + "@divTrunc", + .{ + .tag = .div_trunc, + .param_count = 2, + }, + }, + .{ + "@embedFile", + .{ + .tag = .embed_file, + .param_count = 1, + }, + }, + .{ + "@enumToInt", + .{ + .tag = .enum_to_int, + .param_count = 1, + }, + }, + .{ + "@errorName", + .{ + .tag = .error_name, + .param_count = 1, + }, + }, + .{ + "@errorReturnTrace", + .{ + .tag = .error_return_trace, + .param_count = 0, + }, + }, + .{ + "@errorToInt", + .{ + .tag = .error_to_int, + .param_count = 1, + }, + }, + .{ + "@errSetCast", + .{ + .tag = .err_set_cast, + .eval_to_error = .always, + .param_count = 2, + }, + }, + .{ + "@export", + .{ + .tag = .@"export", + .param_count = 2, + }, + }, + .{ + "@extern", + .{ + .tag = .@"extern", + .param_count = 2, + }, + }, + .{ + "@fence", + .{ + .tag = .fence, + .param_count = 1, + }, + }, + .{ + "@field", + .{ + .tag = .field, + .needs_mem_loc = .always, + .eval_to_error = .maybe, + .param_count = 2, + .allows_lvalue = true, + }, + }, + .{ + "@fieldParentPtr", + .{ + .tag = .field_parent_ptr, + .param_count = 3, + }, + }, + .{ + "@floatCast", + .{ + .tag = .float_cast, + .param_count = 2, + }, + }, + .{ + "@floatToInt", + .{ + .tag = .float_to_int, + .param_count = 2, + }, + }, + .{ + "@frame", + .{ + .tag = .frame, + .param_count = 0, + }, + }, + .{ + "@Frame", + .{ + .tag = .Frame, + .param_count = 1, + }, + }, + .{ + "@frameAddress", + .{ + .tag = .frame_address, + .param_count = 0, + }, + }, + .{ + "@frameSize", + .{ + .tag = .frame_size, + .param_count = 1, + }, + }, + .{ + "@hasDecl", + .{ + .tag = .has_decl, + .param_count = 2, + }, + }, + .{ + "@hasField", + .{ + .tag = .has_field, + .param_count = 2, + }, + }, + .{ + "@import", + .{ + .tag = .import, + .param_count = 1, + }, + }, + .{ + "@intCast", + .{ + .tag = .int_cast, + .param_count = 2, + }, + }, + .{ + "@intToEnum", + .{ + .tag = .int_to_enum, + .param_count = 2, + }, + }, + .{ + "@intToError", + .{ + .tag = .int_to_error, + .eval_to_error = .always, + .param_count = 1, + }, + }, + .{ + "@intToFloat", + .{ + .tag = .int_to_float, + .param_count = 2, + }, + }, + .{ + "@intToPtr", + .{ + .tag = .int_to_ptr, + .param_count = 2, + }, + }, + .{ + "@max", + .{ + .tag = .max, + .param_count = 2, + }, + }, + .{ + "@memcpy", + .{ + .tag = .memcpy, + .param_count = 3, + }, + }, + .{ + "@memset", + .{ + .tag = .memset, + .param_count = 3, + }, + }, + .{ + "@min", + .{ + .tag = .min, + .param_count = 2, + }, + }, + .{ + "@wasmMemorySize", + .{ + .tag = .wasm_memory_size, + .param_count = 1, + }, + }, + .{ + "@wasmMemoryGrow", + .{ + .tag = .wasm_memory_grow, + .param_count = 2, + }, + }, + .{ + "@mod", + .{ + .tag = .mod, + .param_count = 2, + }, + }, + .{ + "@mulWithOverflow", + .{ + .tag = .mul_with_overflow, + .param_count = 2, + }, + }, + .{ + "@panic", + .{ + .tag = .panic, + .param_count = 1, + }, + }, + .{ + "@popCount", + .{ + .tag = .pop_count, + .param_count = 1, + }, + }, + .{ + "@prefetch", + .{ + .tag = .prefetch, + .param_count = 2, + }, + }, + .{ + "@ptrCast", + .{ + .tag = .ptr_cast, + .param_count = 2, + }, + }, + .{ + "@ptrToInt", + .{ + .tag = .ptr_to_int, + .param_count = 1, + }, + }, + .{ + "@rem", + .{ + .tag = .rem, + .param_count = 2, + }, + }, + .{ + "@returnAddress", + .{ + .tag = .return_address, + .param_count = 0, + }, + }, + .{ + "@select", + .{ + .tag = .select, + .param_count = 4, + }, + }, + .{ + "@setAlignStack", + .{ + .tag = .set_align_stack, + .param_count = 1, + }, + }, + .{ + "@setCold", + .{ + .tag = .set_cold, + .param_count = 1, + }, + }, + .{ + "@setEvalBranchQuota", + .{ + .tag = .set_eval_branch_quota, + .param_count = 1, + }, + }, + .{ + "@setFloatMode", + .{ + .tag = .set_float_mode, + .param_count = 1, + }, + }, + .{ + "@setRuntimeSafety", + .{ + .tag = .set_runtime_safety, + .param_count = 1, + }, + }, + .{ + "@shlExact", + .{ + .tag = .shl_exact, + .param_count = 2, + }, + }, + .{ + "@shlWithOverflow", + .{ + .tag = .shl_with_overflow, + .param_count = 2, + }, + }, + .{ + "@shrExact", + .{ + .tag = .shr_exact, + .param_count = 2, + }, + }, + .{ + "@shuffle", + .{ + .tag = .shuffle, + .param_count = 4, + }, + }, + .{ + "@sizeOf", + .{ + .tag = .size_of, + .param_count = 1, + }, + }, + .{ + "@splat", + .{ + .tag = .splat, + .param_count = 2, + }, + }, + .{ + "@reduce", + .{ + .tag = .reduce, + .param_count = 2, + }, + }, + .{ + "@src", + .{ + .tag = .src, + .needs_mem_loc = .always, + .param_count = 0, + }, + }, + .{ + "@sqrt", + .{ + .tag = .sqrt, + .param_count = 1, + }, + }, + .{ + "@sin", + .{ + .tag = .sin, + .param_count = 1, + }, + }, + .{ + "@cos", + .{ + .tag = .cos, + .param_count = 1, + }, + }, + .{ + "@tan", + .{ + .tag = .tan, + .param_count = 1, + }, + }, + .{ + "@exp", + .{ + .tag = .exp, + .param_count = 1, + }, + }, + .{ + "@exp2", + .{ + .tag = .exp2, + .param_count = 1, + }, + }, + .{ + "@log", + .{ + .tag = .log, + .param_count = 1, + }, + }, + .{ + "@log2", + .{ + .tag = .log2, + .param_count = 1, + }, + }, + .{ + "@log10", + .{ + .tag = .log10, + .param_count = 1, + }, + }, + .{ + "@fabs", + .{ + .tag = .fabs, + .param_count = 1, + }, + }, + .{ + "@floor", + .{ + .tag = .floor, + .param_count = 1, + }, + }, + .{ + "@ceil", + .{ + .tag = .ceil, + .param_count = 1, + }, + }, + .{ + "@trunc", + .{ + .tag = .trunc, + .param_count = 1, + }, + }, + .{ + "@round", + .{ + .tag = .round, + .param_count = 1, + }, + }, + .{ + "@subWithOverflow", + .{ + .tag = .sub_with_overflow, + .param_count = 2, + }, + }, + .{ + "@tagName", + .{ + .tag = .tag_name, + .param_count = 1, + }, + }, + .{ + "@This", + .{ + .tag = .This, + .param_count = 0, + }, + }, + .{ + "@trap", + .{ + .tag = .trap, + .param_count = 0, + }, + }, + .{ + "@truncate", + .{ + .tag = .truncate, + .param_count = 2, + }, + }, + .{ + "@Type", + .{ + .tag = .Type, + .param_count = 1, + }, + }, + .{ + "@typeInfo", + .{ + .tag = .type_info, + .param_count = 1, + }, + }, + .{ + "@typeName", + .{ + .tag = .type_name, + .param_count = 1, + }, + }, + .{ + "@TypeOf", + .{ + .tag = .TypeOf, + .param_count = null, + }, + }, + .{ + "@unionInit", + .{ + .tag = .union_init, + .needs_mem_loc = .always, + .param_count = 3, + }, + }, + .{ + "@Vector", + .{ + .tag = .Vector, + .param_count = 2, + }, + }, + .{ + "@volatileCast", + .{ + .tag = .volatile_cast, + .param_count = 1, + }, + }, + .{ + "@workItemId", .{ + .tag = .work_item_id, + .param_count = 1, + }, + }, + .{ + "@workGroupSize", + .{ + .tag = .work_group_size, + .param_count = 1, + }, + }, + .{ + "@workGroupId", + .{ + .tag = .work_group_id, + .param_count = 1, + }, + }, + }); +}; diff --git a/src/stage2/LICENSE b/src/stage2/LICENSE new file mode 100644 index 0000000..c8e2b0f --- /dev/null +++ b/src/stage2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (Expat) + +Copyright (c) 2015-2023, Zig contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/src/stage2/Module.zig b/src/stage2/Module.zig new file mode 100644 index 0000000..e9e4aff --- /dev/null +++ b/src/stage2/Module.zig @@ -0,0 +1,960 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const Allocator = std.mem.Allocator; +const assert = std.debug.assert; +const log = std.log.scoped(.module); +const Ast = std.zig.Ast; + +const Module = @This(); +const DocumentStore = @import("../DocumentStore.zig"); +const Handle = DocumentStore.Handle; + +/// Canonical reference to a position within a source file. +pub const SrcLoc = struct { + handle: *Handle, + /// Might be 0 depending on tag of `lazy`. + parent_decl_node: Ast.Node.Index, + /// Relative to `parent_decl_node`. + lazy: LazySrcLoc, + + pub fn declSrcToken(src_loc: SrcLoc) Ast.TokenIndex { + const tree = src_loc.handle.tree; + return tree.firstToken(src_loc.parent_decl_node); + } + + pub fn declRelativeToNodeIndex(src_loc: SrcLoc, offset: i32) Ast.TokenIndex { + return @bitCast(Ast.Node.Index, offset + @bitCast(i32, src_loc.parent_decl_node)); + } + + pub const Span = struct { + start: u32, + end: u32, + main: u32, + }; + + pub fn span(src_loc: SrcLoc) Span { + switch (src_loc.lazy) { + .unneeded => unreachable, + .entire_file => return Span{ .start = 0, .end = 1, .main = 0 }, + + .byte_abs => |byte_index| return Span{ .start = byte_index, .end = byte_index + 1, .main = byte_index }, + + .token_abs => |tok_index| { + const tree = src_loc.handle.tree; + const start = tree.tokens.items(.start)[tok_index]; + const end = start + @intCast(u32, tree.tokenSlice(tok_index).len); + return Span{ .start = start, .end = end, .main = start }; + }, + .node_abs => |node| { + const tree = src_loc.handle.tree; + return nodeToSpan(tree, node); + }, + .byte_offset => |byte_off| { + const tree = src_loc.handle.tree; + const tok_index = src_loc.declSrcToken(); + const start = tree.tokens.items(.start)[tok_index] + byte_off; + const end = start + @intCast(u32, tree.tokenSlice(tok_index).len); + return Span{ .start = start, .end = end, .main = start }; + }, + .token_offset => |tok_off| { + const tree = src_loc.handle.tree; + const tok_index = src_loc.declSrcToken() + tok_off; + const start = tree.tokens.items(.start)[tok_index]; + const end = start + @intCast(u32, tree.tokenSlice(tok_index).len); + return Span{ .start = start, .end = end, .main = start }; + }, + .node_offset => |traced_off| { + const node_off = traced_off.x; + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + assert(src_loc.handle.tree_loaded); + return nodeToSpan(tree, node); + }, + .node_offset_main_token => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + const main_token = tree.nodes.items(.main_token)[node]; + return tokensToSpan(tree, main_token, main_token, main_token); + }, + .node_offset_bin_op => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + assert(src_loc.handle.tree_loaded); + return nodeToSpan(tree, node); + }, + .node_offset_initializer => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + return tokensToSpan( + tree, + tree.firstToken(node) - 3, + tree.lastToken(node), + tree.nodes.items(.main_token)[node] - 2, + ); + }, + .node_offset_var_decl_ty => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + const node_tags = tree.nodes.items(.tag); + const full = switch (node_tags[node]) { + .global_var_decl, + .local_var_decl, + .simple_var_decl, + .aligned_var_decl, + => tree.fullVarDecl(node).?, + .@"usingnamespace" => { + const node_data = tree.nodes.items(.data); + return nodeToSpan(tree, node_data[node].lhs); + }, + else => unreachable, + }; + if (full.ast.type_node != 0) { + return nodeToSpan(tree, full.ast.type_node); + } + const tok_index = full.ast.mut_token + 1; // the name token + const start = tree.tokens.items(.start)[tok_index]; + const end = start + @intCast(u32, tree.tokenSlice(tok_index).len); + return Span{ .start = start, .end = end, .main = start }; + }, + .node_offset_var_decl_align => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + const full = tree.fullVarDecl(node).?; + return nodeToSpan(tree, full.ast.align_node); + }, + .node_offset_var_decl_section => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + const full = tree.fullVarDecl(node).?; + return nodeToSpan(tree, full.ast.section_node); + }, + .node_offset_var_decl_addrspace => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + const full = tree.fullVarDecl(node).?; + return nodeToSpan(tree, full.ast.addrspace_node); + }, + .node_offset_var_decl_init => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + const full = tree.fullVarDecl(node).?; + return nodeToSpan(tree, full.ast.init_node); + }, + .node_offset_builtin_call_arg0 => |n| return src_loc.byteOffsetBuiltinCallArg(n, 0), + .node_offset_builtin_call_arg1 => |n| return src_loc.byteOffsetBuiltinCallArg(n, 1), + .node_offset_builtin_call_arg2 => |n| return src_loc.byteOffsetBuiltinCallArg(n, 2), + .node_offset_builtin_call_arg3 => |n| return src_loc.byteOffsetBuiltinCallArg(n, 3), + .node_offset_builtin_call_arg4 => |n| return src_loc.byteOffsetBuiltinCallArg(n, 4), + .node_offset_builtin_call_arg5 => |n| return src_loc.byteOffsetBuiltinCallArg(n, 5), + .node_offset_array_access_index => |node_off| { + const tree = src_loc.handle.tree; + const node_datas = tree.nodes.items(.data); + const node = src_loc.declRelativeToNodeIndex(node_off); + return nodeToSpan(tree, node_datas[node].rhs); + }, + .node_offset_slice_ptr, + .node_offset_slice_start, + .node_offset_slice_end, + .node_offset_slice_sentinel, + => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + const full = tree.fullSlice(node).?; + const part_node = switch (src_loc.lazy) { + .node_offset_slice_ptr => full.ast.sliced, + .node_offset_slice_start => full.ast.start, + .node_offset_slice_end => full.ast.end, + .node_offset_slice_sentinel => full.ast.sentinel, + else => unreachable, + }; + return nodeToSpan(tree, part_node); + }, + .node_offset_call_func => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + var buf: [1]Ast.Node.Index = undefined; + const full = tree.fullCall(&buf, node).?; + return nodeToSpan(tree, full.ast.fn_expr); + }, + .node_offset_field_name => |node_off| { + const tree = src_loc.handle.tree; + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + const node = src_loc.declRelativeToNodeIndex(node_off); + const tok_index = switch (node_tags[node]) { + .field_access => node_datas[node].rhs, + else => tree.firstToken(node) - 2, + }; + const start = tree.tokens.items(.start)[tok_index]; + const end = start + @intCast(u32, tree.tokenSlice(tok_index).len); + return Span{ .start = start, .end = end, .main = start }; + }, + .node_offset_deref_ptr => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + return nodeToSpan(tree, node); + }, + .node_offset_asm_source => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + const full = tree.fullAsm(node).?; + return nodeToSpan(tree, full.ast.template); + }, + .node_offset_asm_ret_ty => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + const full = tree.fullAsm(node).?; + const asm_output = full.outputs[0]; + const node_datas = tree.nodes.items(.data); + return nodeToSpan(tree, node_datas[asm_output].lhs); + }, + + .node_offset_for_cond, .node_offset_if_cond => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + const node_tags = tree.nodes.items(.tag); + const src_node = switch (node_tags[node]) { + .if_simple, + .@"if", + => tree.fullIf(node).?.ast.cond_expr, + + .while_simple, + .while_cont, + .@"while", + .for_simple, + .@"for", + => tree.fullWhile(node).?.ast.cond_expr, + + .@"orelse" => node, + .@"catch" => node, + else => unreachable, + }; + return nodeToSpan(tree, src_node); + }, + .node_offset_bin_lhs => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + const node_datas = tree.nodes.items(.data); + return nodeToSpan(tree, node_datas[node].lhs); + }, + .node_offset_bin_rhs => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + const node_datas = tree.nodes.items(.data); + return nodeToSpan(tree, node_datas[node].rhs); + }, + + .node_offset_switch_operand => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + const node_datas = tree.nodes.items(.data); + return nodeToSpan(tree, node_datas[node].lhs); + }, + + .node_offset_switch_special_prong => |node_off| { + const tree = src_loc.handle.tree; + const switch_node = src_loc.declRelativeToNodeIndex(node_off); + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + const extra = tree.extraData(node_datas[switch_node].rhs, Ast.Node.SubRange); + const case_nodes = tree.extra_data[extra.start..extra.end]; + for (case_nodes) |case_node| { + const case = tree.fullSwitchCase(case_node).?; + const is_special = (case.ast.values.len == 0) or + (case.ast.values.len == 1 and + node_tags[case.ast.values[0]] == .identifier and + std.mem.eql(u8, tree.tokenSlice(main_tokens[case.ast.values[0]]), "_")); + if (!is_special) continue; + + return nodeToSpan(tree, case_node); + } else unreachable; + }, + + .node_offset_switch_range => |node_off| { + const tree = src_loc.handle.tree; + const switch_node = src_loc.declRelativeToNodeIndex(node_off); + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + const extra = tree.extraData(node_datas[switch_node].rhs, Ast.Node.SubRange); + const case_nodes = tree.extra_data[extra.start..extra.end]; + for (case_nodes) |case_node| { + const case = tree.fullSwitchCase(case_node).?; + const is_special = (case.ast.values.len == 0) or + (case.ast.values.len == 1 and + node_tags[case.ast.values[0]] == .identifier and + std.mem.eql(u8, tree.tokenSlice(main_tokens[case.ast.values[0]]), "_")); + if (is_special) continue; + + for (case.ast.values) |item_node| { + if (node_tags[item_node] == .switch_range) { + return nodeToSpan(tree, item_node); + } + } + } else unreachable; + }, + .node_offset_switch_prong_capture => |node_off| { + const tree = src_loc.handle.tree; + const case_node = src_loc.declRelativeToNodeIndex(node_off); + const case = tree.fullSwitchCase(case_node).?; + const start_tok = case.payload_token.?; + const token_tags = tree.tokens.items(.tag); + const end_tok = switch (token_tags[start_tok]) { + .asterisk => start_tok + 1, + else => start_tok, + }; + const start = tree.tokens.items(.start)[start_tok]; + const end_start = tree.tokens.items(.start)[end_tok]; + const end = end_start + @intCast(u32, tree.tokenSlice(end_tok).len); + return Span{ .start = start, .end = end, .main = start }; + }, + .node_offset_fn_type_align => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + var buf: [1]Ast.Node.Index = undefined; + const full = tree.fullFnProto(&buf, node).?; + return nodeToSpan(tree, full.ast.align_expr); + }, + .node_offset_fn_type_addrspace => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + var buf: [1]Ast.Node.Index = undefined; + const full = tree.fullFnProto(&buf, node).?; + return nodeToSpan(tree, full.ast.addrspace_expr); + }, + .node_offset_fn_type_section => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + var buf: [1]Ast.Node.Index = undefined; + const full = tree.fullFnProto(&buf, node).?; + return nodeToSpan(tree, full.ast.section_expr); + }, + .node_offset_fn_type_cc => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + var buf: [1]Ast.Node.Index = undefined; + const full = tree.fullFnProto(&buf, node).?; + return nodeToSpan(tree, full.ast.callconv_expr); + }, + + .node_offset_fn_type_ret_ty => |node_off| { + const tree = src_loc.handle.tree; + const node = src_loc.declRelativeToNodeIndex(node_off); + var buf: [1]Ast.Node.Index = undefined; + const full = tree.fullFnProto(&buf, node).?; + return nodeToSpan(tree, full.ast.return_type); + }, + .node_offset_param => |node_off| { + const tree = src_loc.handle.tree; + const token_tags = tree.tokens.items(.tag); + const node = src_loc.declRelativeToNodeIndex(node_off); + + var first_tok = tree.firstToken(node); + while (true) switch (token_tags[first_tok - 1]) { + .colon, .identifier, .keyword_comptime, .keyword_noalias => first_tok -= 1, + else => break, + }; + return tokensToSpan( + tree, + first_tok, + tree.lastToken(node), + first_tok, + ); + }, + .token_offset_param => |token_off| { + const tree = src_loc.handle.tree; + const token_tags = tree.tokens.items(.tag); + const main_token = tree.nodes.items(.main_token)[src_loc.parent_decl_node]; + const tok_index = @bitCast(Ast.TokenIndex, token_off + @bitCast(i32, main_token)); + + var first_tok = tok_index; + while (true) switch (token_tags[first_tok - 1]) { + .colon, .identifier, .keyword_comptime, .keyword_noalias => first_tok -= 1, + else => break, + }; + return tokensToSpan( + tree, + first_tok, + tok_index, + first_tok, + ); + }, + + .node_offset_anyframe_type => |node_off| { + const tree = src_loc.handle.tree; + const node_datas = tree.nodes.items(.data); + const parent_node = src_loc.declRelativeToNodeIndex(node_off); + return nodeToSpan(tree, node_datas[parent_node].rhs); + }, + + .node_offset_lib_name => |node_off| { + const tree = src_loc.handle.tree; + const parent_node = src_loc.declRelativeToNodeIndex(node_off); + var buf: [1]Ast.Node.Index = undefined; + const full = tree.fullFnProto(&buf, parent_node).?; + const tok_index = full.lib_name.?; + const start = tree.tokens.items(.start)[tok_index]; + const end = start + @intCast(u32, tree.tokenSlice(tok_index).len); + return Span{ .start = start, .end = end, .main = start }; + }, + + .node_offset_array_type_len => |node_off| { + const tree = src_loc.handle.tree; + const parent_node = src_loc.declRelativeToNodeIndex(node_off); + + const full = tree.fullArrayType(parent_node).?; + return nodeToSpan(tree, full.ast.elem_count); + }, + .node_offset_array_type_sentinel => |node_off| { + const tree = src_loc.handle.tree; + const parent_node = src_loc.declRelativeToNodeIndex(node_off); + + const full = tree.fullArrayType(parent_node).?; + return nodeToSpan(tree, full.ast.sentinel); + }, + .node_offset_array_type_elem => |node_off| { + const tree = src_loc.handle.tree; + const parent_node = src_loc.declRelativeToNodeIndex(node_off); + + const full = tree.fullArrayType(parent_node).?; + return nodeToSpan(tree, full.ast.elem_type); + }, + .node_offset_un_op => |node_off| { + const tree = src_loc.handle.tree; + const node_datas = tree.nodes.items(.data); + const node = src_loc.declRelativeToNodeIndex(node_off); + + return nodeToSpan(tree, node_datas[node].lhs); + }, + .node_offset_ptr_elem => |node_off| { + const tree = src_loc.handle.tree; + const parent_node = src_loc.declRelativeToNodeIndex(node_off); + + const full = tree.fullPtrType(parent_node).?; + return nodeToSpan(tree, full.ast.child_type); + }, + .node_offset_ptr_sentinel => |node_off| { + const tree = src_loc.handle.tree; + const parent_node = src_loc.declRelativeToNodeIndex(node_off); + + const full = tree.fullPtrType(parent_node).?; + return nodeToSpan(tree, full.ast.sentinel); + }, + .node_offset_ptr_align => |node_off| { + const tree = src_loc.handle.tree; + const parent_node = src_loc.declRelativeToNodeIndex(node_off); + + const full = tree.fullPtrType(parent_node).?; + return nodeToSpan(tree, full.ast.align_node); + }, + .node_offset_ptr_addrspace => |node_off| { + const tree = src_loc.handle.tree; + const parent_node = src_loc.declRelativeToNodeIndex(node_off); + + const full = tree.fullPtrType(parent_node).?; + return nodeToSpan(tree, full.ast.addrspace_node); + }, + .node_offset_ptr_bitoffset => |node_off| { + const tree = src_loc.handle.tree; + const parent_node = src_loc.declRelativeToNodeIndex(node_off); + + const full = tree.fullPtrType(parent_node).?; + return nodeToSpan(tree, full.ast.bit_range_start); + }, + .node_offset_ptr_hostsize => |node_off| { + const tree = src_loc.handle.tree; + const parent_node = src_loc.declRelativeToNodeIndex(node_off); + + const full = tree.fullPtrType(parent_node).?; + return nodeToSpan(tree, full.ast.bit_range_end); + }, + .node_offset_container_tag => |node_off| { + const tree = src_loc.handle.tree; + const node_tags = tree.nodes.items(.tag); + const parent_node = src_loc.declRelativeToNodeIndex(node_off); + + switch (node_tags[parent_node]) { + .container_decl_arg, .container_decl_arg_trailing => { + const full = tree.containerDeclArg(parent_node); + return nodeToSpan(tree, full.ast.arg); + }, + .tagged_union_enum_tag, .tagged_union_enum_tag_trailing => { + const full = tree.taggedUnionEnumTag(parent_node); + + return tokensToSpan( + tree, + tree.firstToken(full.ast.arg) - 2, + tree.lastToken(full.ast.arg) + 1, + tree.nodes.items(.main_token)[full.ast.arg], + ); + }, + else => unreachable, + } + }, + .node_offset_field_default => |node_off| { + const tree = src_loc.handle.tree; + const node_tags = tree.nodes.items(.tag); + const parent_node = src_loc.declRelativeToNodeIndex(node_off); + + const full: Ast.full.ContainerField = switch (node_tags[parent_node]) { + .container_field => tree.containerField(parent_node), + .container_field_init => tree.containerFieldInit(parent_node), + else => unreachable, + }; + return nodeToSpan(tree, full.ast.value_expr); + }, + .node_offset_init_ty => |node_off| { + const tree = src_loc.handle.tree; + const parent_node = src_loc.declRelativeToNodeIndex(node_off); + + var buf: [2]Ast.Node.Index = undefined; + const full = tree.fullArrayInit(&buf, parent_node).?; + return nodeToSpan(tree, full.ast.type_expr); + }, + .node_offset_store_ptr => |node_off| { + const tree = src_loc.handle.tree; + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + const node = src_loc.declRelativeToNodeIndex(node_off); + + switch (node_tags[node]) { + .assign => { + return nodeToSpan(tree, node_datas[node].lhs); + }, + else => return nodeToSpan(tree, node), + } + }, + .node_offset_store_operand => |node_off| { + const tree = src_loc.handle.tree; + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + const node = src_loc.declRelativeToNodeIndex(node_off); + + switch (node_tags[node]) { + .assign => { + return nodeToSpan(tree, node_datas[node].rhs); + }, + else => return nodeToSpan(tree, node), + } + }, + } + } + + pub fn byteOffsetBuiltinCallArg( + src_loc: SrcLoc, + node_off: i32, + arg_index: u32, + ) !Span { + const tree = src_loc.handle.tree; + const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); + const node = src_loc.declRelativeToNodeIndex(node_off); + const param = switch (node_tags[node]) { + .builtin_call_two, .builtin_call_two_comma => switch (arg_index) { + 0 => node_datas[node].lhs, + 1 => node_datas[node].rhs, + else => unreachable, + }, + .builtin_call, .builtin_call_comma => tree.extra_data[node_datas[node].lhs + arg_index], + else => unreachable, + }; + return nodeToSpan(tree, param); + } + + pub fn nodeToSpan(tree: *const Ast, node: u32) Span { + return tokensToSpan( + tree, + tree.firstToken(node), + tree.lastToken(node), + tree.nodes.items(.main_token)[node], + ); + } + + fn tokensToSpan(tree: *const Ast, start: Ast.TokenIndex, end: Ast.TokenIndex, main: Ast.TokenIndex) Span { + const token_starts = tree.tokens.items(.start); + var start_tok = start; + var end_tok = end; + + if (tree.tokensOnSameLine(start, end)) { + // do nothing + } else if (tree.tokensOnSameLine(start, main)) { + end_tok = main; + } else if (tree.tokensOnSameLine(main, end)) { + start_tok = main; + } else { + start_tok = main; + end_tok = main; + } + const start_off = token_starts[start_tok]; + const end_off = token_starts[end_tok] + @intCast(u32, tree.tokenSlice(end_tok).len); + return Span{ .start = start_off, .end = end_off, .main = token_starts[main] }; + } +}; + +/// Resolving a source location into a byte offset may require doing work +/// that we would rather not do unless the error actually occurs. +/// Therefore we need a data structure that contains the information necessary +/// to lazily produce a `SrcLoc` as required. +/// Most of the offsets in this data structure are relative to the containing Decl. +/// This makes the source location resolve properly even when a Decl gets +/// shifted up or down in the file, as long as the Decl's contents itself +/// do not change. +pub const LazySrcLoc = union(enum) { + /// When this tag is set, the code that constructed this `LazySrcLoc` is asserting + /// that all code paths which would need to resolve the source location are + /// unreachable. If you are debugging this tag incorrectly being this value, + /// look into using reverse-continue with a memory watchpoint to see where the + /// value is being set to this tag. + unneeded, + /// Means the source location points to an entire file; not any particular + /// location within the file. `handle` union field will be active. + entire_file, + /// The source location points to a byte offset within a source file, + /// offset from 0. The source file is determined contextually. + /// Inside a `SrcLoc`, the `handle` union field will be active. + byte_abs: u32, + /// The source location points to a token within a source file, + /// offset from 0. The source file is determined contextually. + /// Inside a `SrcLoc`, the `handle` union field will be active. + token_abs: u32, + /// The source location points to an AST node within a source file, + /// offset from 0. The source file is determined contextually. + /// Inside a `SrcLoc`, the `handle` union field will be active. + node_abs: u32, + /// The source location points to a byte offset within a source file, + /// offset from the byte offset of the Decl within the file. + /// The Decl is determined contextually. + byte_offset: u32, + /// This data is the offset into the token list from the Decl token. + /// The Decl is determined contextually. + token_offset: u32, + /// The source location points to an AST node, which is this value offset + /// from its containing Decl node AST index. + /// The Decl is determined contextually. + node_offset: i32, + /// The source location points to the main token of an AST node, found + /// by taking this AST node index offset from the containing Decl AST node. + /// The Decl is determined contextually. + node_offset_main_token: i32, + /// The source location points to the beginning of a struct initializer. + /// The Decl is determined contextually. + node_offset_initializer: i32, + /// The source location points to a variable declaration type expression, + /// found by taking this AST node index offset from the containing + /// Decl AST node, which points to a variable declaration AST node. Next, navigate + /// to the type expression. + /// The Decl is determined contextually. + node_offset_var_decl_ty: i32, + /// The source location points to the alignment expression of a var decl. + /// The Decl is determined contextually. + node_offset_var_decl_align: i32, + /// The source location points to the linksection expression of a var decl. + /// The Decl is determined contextually. + node_offset_var_decl_section: i32, + /// The source location points to the addrspace expression of a var decl. + /// The Decl is determined contextually. + node_offset_var_decl_addrspace: i32, + /// The source location points to the initializer of a var decl. + /// The Decl is determined contextually. + node_offset_var_decl_init: i32, + /// The source location points to a for loop condition expression, + /// found by taking this AST node index offset from the containing + /// Decl AST node, which points to a for loop AST node. Next, navigate + /// to the condition expression. + /// The Decl is determined contextually. + node_offset_for_cond: i32, + /// The source location points to the first parameter of a builtin + /// function call, found by taking this AST node index offset from the containing + /// Decl AST node, which points to a builtin call AST node. Next, navigate + /// to the first parameter. + /// The Decl is determined contextually. + node_offset_builtin_call_arg0: i32, + /// Same as `node_offset_builtin_call_arg0` except arg index 1. + node_offset_builtin_call_arg1: i32, + node_offset_builtin_call_arg2: i32, + node_offset_builtin_call_arg3: i32, + node_offset_builtin_call_arg4: i32, + node_offset_builtin_call_arg5: i32, + /// The source location points to the index expression of an array access + /// expression, found by taking this AST node index offset from the containing + /// Decl AST node, which points to an array access AST node. Next, navigate + /// to the index expression. + /// The Decl is determined contextually. + node_offset_array_access_index: i32, + /// The source location points to the LHS of a slice expression + /// expression, found by taking this AST node index offset from the containing + /// Decl AST node, which points to a slice AST node. Next, navigate + /// to the sentinel expression. + /// The Decl is determined contextually. + node_offset_slice_ptr: i32, + /// The source location points to start expression of a slice expression + /// expression, found by taking this AST node index offset from the containing + /// Decl AST node, which points to a slice AST node. Next, navigate + /// to the sentinel expression. + /// The Decl is determined contextually. + node_offset_slice_start: i32, + /// The source location points to the end expression of a slice + /// expression, found by taking this AST node index offset from the containing + /// Decl AST node, which points to a slice AST node. Next, navigate + /// to the sentinel expression. + /// The Decl is determined contextually. + node_offset_slice_end: i32, + /// The source location points to the sentinel expression of a slice + /// expression, found by taking this AST node index offset from the containing + /// Decl AST node, which points to a slice AST node. Next, navigate + /// to the sentinel expression. + /// The Decl is determined contextually. + node_offset_slice_sentinel: i32, + /// The source location points to the callee expression of a function + /// call expression, found by taking this AST node index offset from the containing + /// Decl AST node, which points to a function call AST node. Next, navigate + /// to the callee expression. + /// The Decl is determined contextually. + node_offset_call_func: i32, + /// The payload is offset from the containing Decl AST node. + /// The source location points to the field name of: + /// * a field access expression (`a.b`), or + /// * the operand ("b" node) of a field initialization expression (`.a = b`) + /// The Decl is determined contextually. + node_offset_field_name: i32, + /// The source location points to the pointer of a pointer deref expression, + /// found by taking this AST node index offset from the containing + /// Decl AST node, which points to a pointer deref AST node. Next, navigate + /// to the pointer expression. + /// The Decl is determined contextually. + node_offset_deref_ptr: i32, + /// The source location points to the assembly source code of an inline assembly + /// expression, found by taking this AST node index offset from the containing + /// Decl AST node, which points to inline assembly AST node. Next, navigate + /// to the asm template source code. + /// The Decl is determined contextually. + node_offset_asm_source: i32, + /// The source location points to the return type of an inline assembly + /// expression, found by taking this AST node index offset from the containing + /// Decl AST node, which points to inline assembly AST node. Next, navigate + /// to the return type expression. + /// The Decl is determined contextually. + node_offset_asm_ret_ty: i32, + /// The source location points to the condition expression of an if + /// expression, found by taking this AST node index offset from the containing + /// Decl AST node, which points to an if expression AST node. Next, navigate + /// to the condition expression. + /// The Decl is determined contextually. + node_offset_if_cond: i32, + /// The source location points to a binary expression, such as `a + b`, found + /// by taking this AST node index offset from the containing Decl AST node. + /// The Decl is determined contextually. + node_offset_bin_op: i32, + /// The source location points to the LHS of a binary expression, found + /// by taking this AST node index offset from the containing Decl AST node, + /// which points to a binary expression AST node. Next, navigate to the LHS. + /// The Decl is determined contextually. + node_offset_bin_lhs: i32, + /// The source location points to the RHS of a binary expression, found + /// by taking this AST node index offset from the containing Decl AST node, + /// which points to a binary expression AST node. Next, navigate to the RHS. + /// The Decl is determined contextually. + node_offset_bin_rhs: i32, + /// The source location points to the operand of a switch expression, found + /// by taking this AST node index offset from the containing Decl AST node, + /// which points to a switch expression AST node. Next, navigate to the operand. + /// The Decl is determined contextually. + node_offset_switch_operand: i32, + /// The source location points to the else/`_` prong of a switch expression, found + /// by taking this AST node index offset from the containing Decl AST node, + /// which points to a switch expression AST node. Next, navigate to the else/`_` prong. + /// The Decl is determined contextually. + node_offset_switch_special_prong: i32, + /// The source location points to all the ranges of a switch expression, found + /// by taking this AST node index offset from the containing Decl AST node, + /// which points to a switch expression AST node. Next, navigate to any of the + /// range nodes. The error applies to all of them. + /// The Decl is determined contextually. + node_offset_switch_range: i32, + /// The source location points to the capture of a switch_prong. + /// The Decl is determined contextually. + node_offset_switch_prong_capture: i32, + /// The source location points to the align expr of a function type + /// expression, found by taking this AST node index offset from the containing + /// Decl AST node, which points to a function type AST node. Next, navigate to + /// the calling convention node. + /// The Decl is determined contextually. + node_offset_fn_type_align: i32, + /// The source location points to the addrspace expr of a function type + /// expression, found by taking this AST node index offset from the containing + /// Decl AST node, which points to a function type AST node. Next, navigate to + /// the calling convention node. + /// The Decl is determined contextually. + node_offset_fn_type_addrspace: i32, + /// The source location points to the linksection expr of a function type + /// expression, found by taking this AST node index offset from the containing + /// Decl AST node, which points to a function type AST node. Next, navigate to + /// the calling convention node. + /// The Decl is determined contextually. + node_offset_fn_type_section: i32, + /// The source location points to the calling convention of a function type + /// expression, found by taking this AST node index offset from the containing + /// Decl AST node, which points to a function type AST node. Next, navigate to + /// the calling convention node. + /// The Decl is determined contextually. + node_offset_fn_type_cc: i32, + /// The source location points to the return type of a function type + /// expression, found by taking this AST node index offset from the containing + /// Decl AST node, which points to a function type AST node. Next, navigate to + /// the return type node. + /// The Decl is determined contextually. + node_offset_fn_type_ret_ty: i32, + node_offset_param: i32, + token_offset_param: i32, + /// The source location points to the type expression of an `anyframe->T` + /// expression, found by taking this AST node index offset from the containing + /// Decl AST node, which points to a `anyframe->T` expression AST node. Next, navigate + /// to the type expression. + /// The Decl is determined contextually. + node_offset_anyframe_type: i32, + /// The source location points to the string literal of `extern "foo"`, found + /// by taking this AST node index offset from the containing + /// Decl AST node, which points to a function prototype or variable declaration + /// expression AST node. Next, navigate to the string literal of the `extern "foo"`. + /// The Decl is determined contextually. + node_offset_lib_name: i32, + /// The source location points to the len expression of an `[N:S]T` + /// expression, found by taking this AST node index offset from the containing + /// Decl AST node, which points to an `[N:S]T` expression AST node. Next, navigate + /// to the len expression. + /// The Decl is determined contextually. + node_offset_array_type_len: i32, + /// The source location points to the sentinel expression of an `[N:S]T` + /// expression, found by taking this AST node index offset from the containing + /// Decl AST node, which points to an `[N:S]T` expression AST node. Next, navigate + /// to the sentinel expression. + /// The Decl is determined contextually. + node_offset_array_type_sentinel: i32, + /// The source location points to the elem expression of an `[N:S]T` + /// expression, found by taking this AST node index offset from the containing + /// Decl AST node, which points to an `[N:S]T` expression AST node. Next, navigate + /// to the elem expression. + /// The Decl is determined contextually. + node_offset_array_type_elem: i32, + /// The source location points to the operand of an unary expression. + /// The Decl is determined contextually. + node_offset_un_op: i32, + /// The source location points to the elem type of a pointer. + /// The Decl is determined contextually. + node_offset_ptr_elem: i32, + /// The source location points to the sentinel of a pointer. + /// The Decl is determined contextually. + node_offset_ptr_sentinel: i32, + /// The source location points to the align expr of a pointer. + /// The Decl is determined contextually. + node_offset_ptr_align: i32, + /// The source location points to the addrspace expr of a pointer. + /// The Decl is determined contextually. + node_offset_ptr_addrspace: i32, + /// The source location points to the bit-offset of a pointer. + /// The Decl is determined contextually. + node_offset_ptr_bitoffset: i32, + /// The source location points to the host size of a pointer. + /// The Decl is determined contextually. + node_offset_ptr_hostsize: i32, + /// The source location points to the tag type of an union or an enum. + /// The Decl is determined contextually. + node_offset_container_tag: i32, + /// The source location points to the default value of a field. + /// The Decl is determined contextually. + node_offset_field_default: i32, + /// The source location points to the type of an array or struct initializer. + /// The Decl is determined contextually. + node_offset_init_ty: i32, + /// The source location points to the LHS of an assignment. + /// The Decl is determined contextually. + node_offset_store_ptr: i32, + /// The source location points to the RHS of an assignment. + /// The Decl is determined contextually. + node_offset_store_operand: i32, + + pub fn nodeOffset(node_offset: i32) LazySrcLoc { + return .{ .node_offset = node_offset }; + } + + pub fn toSrcLoc(lazy: LazySrcLoc, handle: *Handle, src_node: Ast.Node.Index) SrcLoc { + return switch (lazy) { + .unneeded, + .entire_file, + .byte_abs, + .token_abs, + .node_abs, + => .{ + .handle = handle, + .parent_decl_node = 0, + .lazy = lazy, + }, + + .byte_offset, + .token_offset, + .node_offset, + .node_offset_main_token, + .node_offset_initializer, + .node_offset_var_decl_ty, + .node_offset_var_decl_align, + .node_offset_var_decl_section, + .node_offset_var_decl_addrspace, + .node_offset_var_decl_init, + .node_offset_for_cond, + .node_offset_builtin_call_arg0, + .node_offset_builtin_call_arg1, + .node_offset_builtin_call_arg2, + .node_offset_builtin_call_arg3, + .node_offset_builtin_call_arg4, + .node_offset_builtin_call_arg5, + .node_offset_array_access_index, + .node_offset_slice_ptr, + .node_offset_slice_start, + .node_offset_slice_end, + .node_offset_slice_sentinel, + .node_offset_call_func, + .node_offset_field_name, + .node_offset_deref_ptr, + .node_offset_asm_source, + .node_offset_asm_ret_ty, + .node_offset_if_cond, + .node_offset_bin_op, + .node_offset_bin_lhs, + .node_offset_bin_rhs, + .node_offset_switch_operand, + .node_offset_switch_special_prong, + .node_offset_switch_range, + .node_offset_switch_prong_capture, + .node_offset_fn_type_align, + .node_offset_fn_type_addrspace, + .node_offset_fn_type_section, + .node_offset_fn_type_cc, + .node_offset_fn_type_ret_ty, + .node_offset_param, + .token_offset_param, + .node_offset_anyframe_type, + .node_offset_lib_name, + .node_offset_array_type_len, + .node_offset_array_type_sentinel, + .node_offset_array_type_elem, + .node_offset_un_op, + .node_offset_ptr_elem, + .node_offset_ptr_sentinel, + .node_offset_ptr_align, + .node_offset_ptr_addrspace, + .node_offset_ptr_bitoffset, + .node_offset_ptr_hostsize, + .node_offset_container_tag, + .node_offset_field_default, + .node_offset_init_ty, + .node_offset_store_ptr, + .node_offset_store_operand, + => .{ + .handle = handle, + .parent_decl_node = src_node, + .lazy = lazy, + }, + }; + } +}; diff --git a/src/stage2/Zir.zig b/src/stage2/Zir.zig new file mode 100644 index 0000000..e3cf276 --- /dev/null +++ b/src/stage2/Zir.zig @@ -0,0 +1,3855 @@ +//! Zig Intermediate Representation. Astgen.zig converts AST nodes to these +//! untyped IR instructions. Next, Sema.zig processes these into AIR. +//! The minimum amount of information needed to represent a list of ZIR instructions. +//! Once this structure is completed, it can be used to generate AIR, followed by +//! machine code, without any memory access into the AST tree token list, node list, +//! or source bytes. Exceptions include: +//! * Compile errors, which may need to reach into these data structures to +//! create a useful report. +//! * In the future, possibly inline assembly, which needs to get parsed and +//! handled by the codegen backend, and errors reported there. However for now, +//! inline assembly is not an exception. + +const std = @import("std"); +const builtin = @import("builtin"); +const mem = std.mem; +const Allocator = std.mem.Allocator; +const assert = std.debug.assert; +const BigIntConst = std.math.big.int.Const; +const BigIntMutable = std.math.big.int.Mutable; +const Ast = std.zig.Ast; + +const Zir = @This(); +const Module = @import("Module.zig"); +const LazySrcLoc = Module.LazySrcLoc; + +instructions: std.MultiArrayList(Inst).Slice, +/// In order to store references to strings in fewer bytes, we copy all +/// string bytes into here. String bytes can be null. It is up to whomever +/// is referencing the data here whether they want to store both index and length, +/// thus allowing null bytes, or store only index, and use null-termination. The +/// `string_bytes` array is agnostic to either usage. +/// Indexes 0 and 1 are reserved for special cases. +string_bytes: []u8, +/// The meaning of this data is determined by `Inst.Tag` value. +/// The first few indexes are reserved. See `ExtraIndex` for the values. +extra: []u32, + +/// The data stored at byte offset 0 when ZIR is stored in a file. +pub const Header = extern struct { + instructions_len: u32, + string_bytes_len: u32, + extra_len: u32, + /// We could leave this as padding, however it triggers a Valgrind warning because + /// we read and write undefined bytes to the file system. This is harmless, but + /// it's essentially free to have a zero field here and makes the warning go away, + /// making it more likely that following Valgrind warnings will be taken seriously. + unused: u32 = 0, + stat_inode: std.fs.File.INode, + stat_size: u64, + stat_mtime: i128, +}; + +pub const ExtraIndex = enum(u32) { + /// If this is 0, no compile errors. Otherwise there is a `CompileErrors` + /// payload at this index. + compile_errors, + /// If this is 0, this file contains no imports. Otherwise there is a `Imports` + /// payload at this index. + imports, + + _, +}; + +/// Returns the requested data, as well as the new index which is at the start of the +/// trailers for the object. +pub fn extraData(code: Zir, comptime T: type, index: usize) struct { data: T, end: usize } { + const fields = @typeInfo(T).Struct.fields; + var i: usize = index; + var result: T = undefined; + inline for (fields) |field| { + @field(result, field.name) = switch (field.type) { + u32 => code.extra[i], + Inst.Ref => @intToEnum(Inst.Ref, code.extra[i]), + i32 => @bitCast(i32, code.extra[i]), + Inst.Call.Flags => @bitCast(Inst.Call.Flags, code.extra[i]), + Inst.BuiltinCall.Flags => @bitCast(Inst.BuiltinCall.Flags, code.extra[i]), + Inst.SwitchBlock.Bits => @bitCast(Inst.SwitchBlock.Bits, code.extra[i]), + Inst.FuncFancy.Bits => @bitCast(Inst.FuncFancy.Bits, code.extra[i]), + else => @compileError("bad field type"), + }; + i += 1; + } + return .{ + .data = result, + .end = i, + }; +} + +/// Given an index into `string_bytes` returns the null-terminated string found there. +pub fn nullTerminatedString(code: Zir, index: usize) [:0]const u8 { + var end: usize = index; + while (code.string_bytes[end] != 0) { + end += 1; + } + return code.string_bytes[index..end :0]; +} + +pub fn refSlice(code: Zir, start: usize, len: usize) []Inst.Ref { + const raw_slice = code.extra[start..][0..len]; + return @ptrCast([]Inst.Ref, raw_slice); +} + +pub fn hasCompileErrors(code: Zir) bool { + return code.extra[@enumToInt(ExtraIndex.compile_errors)] != 0; +} + +pub fn deinit(code: *Zir, gpa: Allocator) void { + code.instructions.deinit(gpa); + gpa.free(code.string_bytes); + gpa.free(code.extra); + code.* = undefined; +} + +/// ZIR is structured so that the outermost "main" struct of any file +/// is always at index 0. +pub const main_struct_inst: Inst.Index = 0; + +/// These are untyped instructions generated from an Abstract Syntax Tree. +/// The data here is immutable because it is possible to have multiple +/// analyses on the same ZIR happening at the same time. +pub const Inst = struct { + tag: Tag, + data: Data, + + /// These names are used directly as the instruction names in the text format. + /// See `data_field_map` for a list of which `Data` fields are used by each `Tag`. + pub const Tag = enum(u8) { + /// Arithmetic addition, asserts no integer overflow. + /// Uses the `pl_node` union field. Payload is `Bin`. + add, + /// Twos complement wrapping integer addition. + /// Uses the `pl_node` union field. Payload is `Bin`. + addwrap, + /// Saturating addition. + /// Uses the `pl_node` union field. Payload is `Bin`. + add_sat, + /// The same as `add` except no safety check. + add_unsafe, + /// Arithmetic subtraction. Asserts no integer overflow. + /// Uses the `pl_node` union field. Payload is `Bin`. + sub, + /// Twos complement wrapping integer subtraction. + /// Uses the `pl_node` union field. Payload is `Bin`. + subwrap, + /// Saturating subtraction. + /// Uses the `pl_node` union field. Payload is `Bin`. + sub_sat, + /// Arithmetic multiplication. Asserts no integer overflow. + /// Uses the `pl_node` union field. Payload is `Bin`. + mul, + /// Twos complement wrapping integer multiplication. + /// Uses the `pl_node` union field. Payload is `Bin`. + mulwrap, + /// Saturating multiplication. + /// Uses the `pl_node` union field. Payload is `Bin`. + mul_sat, + /// Implements the `@divExact` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + div_exact, + /// Implements the `@divFloor` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + div_floor, + /// Implements the `@divTrunc` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + div_trunc, + /// Implements the `@mod` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + mod, + /// Implements the `@rem` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + rem, + /// Ambiguously remainder division or modulus. If the computation would possibly have + /// a different value depending on whether the operation is remainder division or modulus, + /// a compile error is emitted. Otherwise the computation is performed. + /// Uses the `pl_node` union field. Payload is `Bin`. + mod_rem, + /// Integer shift-left. Zeroes are shifted in from the right hand side. + /// Uses the `pl_node` union field. Payload is `Bin`. + shl, + /// Implements the `@shlExact` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + shl_exact, + /// Saturating shift-left. + /// Uses the `pl_node` union field. Payload is `Bin`. + shl_sat, + /// Integer shift-right. Arithmetic or logical depending on the signedness of + /// the integer type. + /// Uses the `pl_node` union field. Payload is `Bin`. + shr, + /// Implements the `@shrExact` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + shr_exact, + + /// Declares a parameter of the current function. Used for: + /// * debug info + /// * checking shadowing against declarations in the current namespace + /// * parameter type expressions referencing other parameters + /// These occur in the block outside a function body (the same block as + /// contains the func instruction). + /// Uses the `pl_tok` field. Token is the parameter name, payload is a `Param`. + param, + /// Same as `param` except the parameter is marked comptime. + param_comptime, + /// Same as `param` except the parameter is marked anytype. + /// Uses the `str_tok` field. Token is the parameter name. String is the parameter name. + param_anytype, + /// Same as `param` except the parameter is marked both comptime and anytype. + /// Uses the `str_tok` field. Token is the parameter name. String is the parameter name. + param_anytype_comptime, + /// Array concatenation. `a ++ b` + /// Uses the `pl_node` union field. Payload is `Bin`. + array_cat, + /// Array multiplication `a ** b` + /// Uses the `pl_node` union field. Payload is `Bin`. + array_mul, + /// `[N]T` syntax. No source location provided. + /// Uses the `pl_node` union field. Payload is `Bin`. lhs is length, rhs is element type. + array_type, + /// `[N:S]T` syntax. Source location is the array type expression node. + /// Uses the `pl_node` union field. Payload is `ArrayTypeSentinel`. + array_type_sentinel, + /// `@Vector` builtin. + /// Uses the `pl_node` union field with `Bin` payload. + /// lhs is length, rhs is element type. + vector_type, + /// Given an indexable type, returns the type of the element at given index. + /// Uses the `bin` union field. lhs is the indexable type, rhs is the index. + elem_type_index, + /// Given a pointer to an indexable object, returns the len property. This is + /// used by for loops. This instruction also emits a for-loop specific compile + /// error if the indexable object is not indexable. + /// Uses the `un_node` field. The AST node is the for loop node. + indexable_ptr_len, + /// Create a `anyframe->T` type. + /// Uses the `un_node` field. + anyframe_type, + /// Type coercion. No source location attached. + /// Uses the `bin` field. + as, + /// Type coercion to the function's return type. + /// Uses the `pl_node` field. Payload is `As`. AST node could be many things. + as_node, + /// Same as `as_node` but ignores runtime to comptime int error. + as_shift_operand, + /// Bitwise AND. `&` + bit_and, + /// Reinterpret the memory representation of a value as a different type. + /// Uses the pl_node field with payload `Bin`. + bitcast, + /// Bitwise NOT. `~` + /// Uses `un_tok`. + bit_not, + /// Bitwise OR. `|` + bit_or, + /// A labeled block of code, which can return a value. + /// Uses the `pl_node` union field. Payload is `Block`. + block, + /// A list of instructions which are analyzed in the parent context, without + /// generating a runtime block. Must terminate with an "inline" variant of + /// a noreturn instruction. + /// Uses the `pl_node` union field. Payload is `Block`. + block_inline, + /// Implements `suspend {...}`. + /// Uses the `pl_node` union field. Payload is `Block`. + suspend_block, + /// Boolean NOT. See also `bit_not`. + /// Uses the `un_tok` field. + bool_not, + /// Short-circuiting boolean `and`. `lhs` is a boolean `Ref` and the other operand + /// is a block, which is evaluated if `lhs` is `true`. + /// Uses the `bool_br` union field. + bool_br_and, + /// Short-circuiting boolean `or`. `lhs` is a boolean `Ref` and the other operand + /// is a block, which is evaluated if `lhs` is `false`. + /// Uses the `bool_br` union field. + bool_br_or, + /// Return a value from a block. + /// Uses the `break` union field. + /// Uses the source information from previous instruction. + @"break", + /// Return a value from a block. This instruction is used as the terminator + /// of a `block_inline`. It allows using the return value from `Sema.analyzeBody`. + /// This instruction may also be used when it is known that there is only one + /// break instruction in a block, and the target block is the parent. + /// Uses the `break` union field. + break_inline, + /// Checks that comptime control flow does not happen inside a runtime block. + /// Uses the `un_node` union field. + check_comptime_control_flow, + /// Function call. + /// Uses the `pl_node` union field with payload `Call`. + /// AST node is the function call. + call, + /// Implements the `@call` builtin. + /// Uses the `pl_node` union field with payload `BuiltinCall`. + /// AST node is the builtin call. + builtin_call, + /// `<` + /// Uses the `pl_node` union field. Payload is `Bin`. + cmp_lt, + /// `<=` + /// Uses the `pl_node` union field. Payload is `Bin`. + cmp_lte, + /// `==` + /// Uses the `pl_node` union field. Payload is `Bin`. + cmp_eq, + /// `>=` + /// Uses the `pl_node` union field. Payload is `Bin`. + cmp_gte, + /// `>` + /// Uses the `pl_node` union field. Payload is `Bin`. + cmp_gt, + /// `!=` + /// Uses the `pl_node` union field. Payload is `Bin`. + cmp_neq, + /// Coerces a result location pointer to a new element type. It is evaluated "backwards"- + /// as type coercion from the new element type to the old element type. + /// Uses the `pl_node` union field. Payload is `Bin`. + /// LHS is destination element type, RHS is result pointer. + coerce_result_ptr, + /// Conditional branch. Splits control flow based on a boolean condition value. + /// Uses the `pl_node` union field. AST node is an if, while, for, etc. + /// Payload is `CondBr`. + condbr, + /// Same as `condbr`, except the condition is coerced to a comptime value, and + /// only the taken branch is analyzed. The then block and else block must + /// terminate with an "inline" variant of a noreturn instruction. + condbr_inline, + /// Given an operand which is an error union, splits control flow. In + /// case of error, control flow goes into the block that is part of this + /// instruction, which is guaranteed to end with a return instruction + /// and never breaks out of the block. + /// In the case of non-error, control flow proceeds to the next instruction + /// after the `try`, with the result of this instruction being the unwrapped + /// payload value, as if `err_union_payload_unsafe` was executed on the operand. + /// Uses the `pl_node` union field. Payload is `Try`. + @"try", + ///// Same as `try` except the operand is coerced to a comptime value, and + ///// only the taken branch is analyzed. The block must terminate with an "inline" + ///// variant of a noreturn instruction. + //try_inline, + /// Same as `try` except the operand is a pointer and the result is a pointer. + try_ptr, + ///// Same as `try_inline` except the operand is a pointer and the result is a pointer. + //try_ptr_inline, + /// An error set type definition. Contains a list of field names. + /// Uses the `pl_node` union field. Payload is `ErrorSetDecl`. + error_set_decl, + error_set_decl_anon, + error_set_decl_func, + /// Declares the beginning of a statement. Used for debug info. + /// Uses the `dbg_stmt` union field. The line and column are offset + /// from the parent declaration. + dbg_stmt, + /// Marks a variable declaration. Used for debug info. + /// Uses the `str_op` union field. The string is the local variable name, + /// and the operand is the pointer to the variable's location. The local + /// may be a const or a var. + dbg_var_ptr, + /// Same as `dbg_var_ptr` but the local is always a const and the operand + /// is the local's value. + dbg_var_val, + /// Marks the beginning of a semantic scope for debug info variables. + dbg_block_begin, + /// Marks the end of a semantic scope for debug info variables. + dbg_block_end, + /// Uses a name to identify a Decl and takes a pointer to it. + /// Uses the `str_tok` union field. + decl_ref, + /// Uses a name to identify a Decl and uses it as a value. + /// Uses the `str_tok` union field. + decl_val, + /// Load the value from a pointer. Assumes `x.*` syntax. + /// Uses `un_node` field. AST node is the `x.*` syntax. + load, + /// Arithmetic division. Asserts no integer overflow. + /// Uses the `pl_node` union field. Payload is `Bin`. + div, + /// Given a pointer to an array, slice, or pointer, returns a pointer to the element at + /// the provided index. + /// Uses the `pl_node` union field. AST node is a[b] syntax. Payload is `Bin`. + elem_ptr_node, + /// Same as `elem_ptr_node` but used only for for loop. + /// Uses the `pl_node` union field. AST node is the condition of a for loop. + /// Payload is `Bin`. + /// No OOB safety check is emitted. + elem_ptr, + /// Same as `elem_ptr_node` except the index is stored immediately rather than + /// as a reference to another ZIR instruction. + /// Uses the `pl_node` union field. AST node is an element inside array initialization + /// syntax. Payload is `ElemPtrImm`. + /// This instruction has a way to set the result type to be a + /// single-pointer or a many-pointer. + elem_ptr_imm, + /// Given an array, slice, or pointer, returns the element at the provided index. + /// Uses the `pl_node` union field. AST node is a[b] syntax. Payload is `Bin`. + elem_val_node, + /// Same as `elem_val_node` but used only for for loop. + /// Uses the `pl_node` union field. AST node is the condition of a for loop. + /// Payload is `Bin`. + /// No OOB safety check is emitted. + elem_val, + /// Emits a compile error if the operand is not `void`. + /// Uses the `un_node` field. + ensure_result_used, + /// Emits a compile error if an error is ignored. + /// Uses the `un_node` field. + ensure_result_non_error, + /// Emits a compile error error union payload is not void. + ensure_err_union_payload_void, + /// Create a `E!T` type. + /// Uses the `pl_node` field with `Bin` payload. + error_union_type, + /// `error.Foo` syntax. Uses the `str_tok` field of the Data union. + error_value, + /// Implements the `@export` builtin function, based on either an identifier to a Decl, + /// or field access of a Decl. The thing being exported is the Decl. + /// Uses the `pl_node` union field. Payload is `Export`. + @"export", + /// Implements the `@export` builtin function, based on a comptime-known value. + /// The thing being exported is the comptime-known value which is the operand. + /// Uses the `pl_node` union field. Payload is `ExportValue`. + export_value, + /// Given a pointer to a struct or object that contains virtual fields, returns a pointer + /// to the named field. The field name is stored in string_bytes. Used by a.b syntax. + /// Uses `pl_node` field. The AST node is the a.b syntax. Payload is Field. + field_ptr, + /// Same as `field_ptr` but used for struct init. + field_ptr_init, + /// Given a struct or object that contains virtual fields, returns the named field. + /// The field name is stored in string_bytes. Used by a.b syntax. + /// This instruction also accepts a pointer. + /// Uses `pl_node` field. The AST node is the a.b syntax. Payload is Field. + field_val, + /// Given a pointer to a struct or object that contains virtual fields, returns the + /// named field. If there is no named field, searches in the type for a decl that + /// matches the field name. The decl is resolved and we ensure that it's a function + /// which can accept the object as the first parameter, with one pointer fixup. If + /// all of that works, this instruction produces a special "bound function" value + /// which contains both the function and the saved first parameter value. + /// Bound functions may only be used as the function parameter to a `call` or + /// `builtin_call` instruction. Any other use is invalid zir and may crash the compiler. + field_call_bind, + /// Given a pointer to a struct or object that contains virtual fields, returns a pointer + /// to the named field. The field name is a comptime instruction. Used by @field. + /// Uses `pl_node` field. The AST node is the builtin call. Payload is FieldNamed. + field_ptr_named, + /// Given a struct or object that contains virtual fields, returns the named field. + /// The field name is a comptime instruction. Used by @field. + /// Uses `pl_node` field. The AST node is the builtin call. Payload is FieldNamed. + field_val_named, + /// Returns a function type, or a function instance, depending on whether + /// the body_len is 0. Calling convention is auto. + /// Uses the `pl_node` union field. `payload_index` points to a `Func`. + func, + /// Same as `func` but has an inferred error set. + func_inferred, + /// Represents a function declaration or function prototype, depending on + /// whether body_len is 0. + /// Uses the `pl_node` union field. `payload_index` points to a `FuncFancy`. + func_fancy, + /// Implements the `@import` builtin. + /// Uses the `str_tok` field. + import, + /// Integer literal that fits in a u64. Uses the `int` union field. + int, + /// Arbitrary sized integer literal. Uses the `str` union field. + int_big, + /// A float literal that fits in a f64. Uses the float union value. + float, + /// A float literal that fits in a f128. Uses the `pl_node` union value. + /// Payload is `Float128`. + float128, + /// Make an integer type out of signedness and bit count. + /// Payload is `int_type` + int_type, + /// Return a boolean false if an optional is null. `x != null` + /// Uses the `un_node` field. + is_non_null, + /// Return a boolean false if an optional is null. `x.* != null` + /// Uses the `un_node` field. + is_non_null_ptr, + /// Return a boolean false if value is an error + /// Uses the `un_node` field. + is_non_err, + /// Return a boolean false if dereferenced pointer is an error + /// Uses the `un_node` field. + is_non_err_ptr, + /// Same as `is_non_er` but doesn't validate that the type can be an error. + /// Uses the `un_node` field. + ret_is_non_err, + /// A labeled block of code that loops forever. At the end of the body will have either + /// a `repeat` instruction or a `repeat_inline` instruction. + /// Uses the `pl_node` field. The AST node is either a for loop or while loop. + /// This ZIR instruction is needed because AIR does not (yet?) match ZIR, and Sema + /// needs to emit more than 1 AIR block for this instruction. + /// The payload is `Block`. + loop, + /// Sends runtime control flow back to the beginning of the current block. + /// Uses the `node` field. + repeat, + /// Sends comptime control flow back to the beginning of the current block. + /// Uses the `node` field. + repeat_inline, + /// Asserts that all the lengths provided match. Used to build a for loop. + /// Return value is the length as a usize. + /// Uses the `pl_node` field with payload `MultiOp`. + /// There is exactly one item corresponding to each AST node inside the for + /// loop condition. Any item may be `none`, indicating an unbounded range. + /// Illegal behaviors: + /// * If all lengths are unbounded ranges (always a compile error). + /// * If any two lengths do not match each other. + for_len, + /// Merge two error sets into one, `E1 || E2`. + /// Uses the `pl_node` field with payload `Bin`. + merge_error_sets, + /// Turns an R-Value into a const L-Value. In other words, it takes a value, + /// stores it in a memory location, and returns a const pointer to it. If the value + /// is `comptime`, the memory location is global static constant data. Otherwise, + /// the memory location is in the stack frame, local to the scope containing the + /// instruction. + /// Uses the `un_tok` union field. + ref, + /// Sends control flow back to the function's callee. + /// Includes an operand as the return value. + /// Includes an AST node source location. + /// Uses the `un_node` union field. + ret_node, + /// Sends control flow back to the function's callee. + /// The operand is a `ret_ptr` instruction, where the return value can be found. + /// Includes an AST node source location. + /// Uses the `un_node` union field. + ret_load, + /// Sends control flow back to the function's callee. + /// Includes an operand as the return value. + /// Includes a token source location. + /// Uses the `un_tok` union field. + ret_implicit, + /// Sends control flow back to the function's callee. + /// The return operand is `error.foo` where `foo` is given by the string. + /// If the current function has an inferred error set, the error given by the + /// name is added to it. + /// Uses the `str_tok` union field. + ret_err_value, + /// A string name is provided which is an anonymous error set value. + /// If the current function has an inferred error set, the error given by the + /// name is added to it. + /// Results in the error code. Note that control flow is not diverted with + /// this instruction; a following 'ret' instruction will do the diversion. + /// Uses the `str_tok` union field. + ret_err_value_code, + /// Obtains a pointer to the return value. + /// Uses the `node` union field. + ret_ptr, + /// Obtains the return type of the in-scope function. + /// Uses the `node` union field. + ret_type, + /// Create a pointer type which can have a sentinel, alignment, address space, and/or bit range. + /// Uses the `ptr_type` union field. + ptr_type, + /// Slice operation `lhs[rhs..]`. No sentinel and no end offset. + /// Returns a pointer to the subslice. + /// Uses the `pl_node` field. AST node is the slice syntax. Payload is `SliceStart`. + slice_start, + /// Slice operation `array_ptr[start..end]`. No sentinel. + /// Returns a pointer to the subslice. + /// Uses the `pl_node` field. AST node is the slice syntax. Payload is `SliceEnd`. + slice_end, + /// Slice operation `array_ptr[start..end:sentinel]`. + /// Returns a pointer to the subslice. + /// Uses the `pl_node` field. AST node is the slice syntax. Payload is `SliceSentinel`. + slice_sentinel, + /// Write a value to a pointer. For loading, see `load`. + /// Source location is assumed to be same as previous instruction. + /// Uses the `bin` union field. + store, + /// Same as `store` except provides a source location. + /// Uses the `pl_node` union field. Payload is `Bin`. + store_node, + /// This instruction is not really supposed to be emitted from AstGen; nevertheless it + /// is sometimes emitted due to deficiencies in AstGen. When Sema sees this instruction, + /// it must clean up after AstGen's mess by looking at various context clues and + /// then treating it as one of the following: + /// * no-op + /// * store_to_inferred_ptr + /// * store + /// Uses the `bin` union field with LHS as the pointer to store to. + store_to_block_ptr, + /// Same as `store` but the type of the value being stored will be used to infer + /// the pointer type. + /// Uses the `bin` union field - Astgen.zig depends on the ability to change + /// the tag of an instruction from `store_to_block_ptr` to `store_to_inferred_ptr` + /// without changing the data. + store_to_inferred_ptr, + /// String Literal. Makes an anonymous Decl and then takes a pointer to it. + /// Uses the `str` union field. + str, + /// Arithmetic negation. Asserts no integer overflow. + /// Same as sub with a lhs of 0, split into a separate instruction to save memory. + /// Uses `un_node`. + negate, + /// Twos complement wrapping integer negation. + /// Same as subwrap with a lhs of 0, split into a separate instruction to save memory. + /// Uses `un_node`. + negate_wrap, + /// Returns the type of a value. + /// Uses the `un_node` field. + typeof, + /// Implements `@TypeOf` for one operand. + /// Uses the `pl_node` field. + typeof_builtin, + /// Given a value, look at the type of it, which must be an integer type. + /// Returns the integer type for the RHS of a shift operation. + /// Uses the `un_node` field. + typeof_log2_int_type, + /// Asserts control-flow will not reach this instruction (`unreachable`). + /// Uses the `@"unreachable"` union field. + @"unreachable", + /// Bitwise XOR. `^` + /// Uses the `pl_node` union field. Payload is `Bin`. + xor, + /// Create an optional type '?T' + /// Uses the `un_node` field. + optional_type, + /// ?T => T with safety. + /// Given an optional value, returns the payload value, with a safety check that + /// the value is non-null. Used for `orelse`, `if` and `while`. + /// Uses the `un_node` field. + optional_payload_safe, + /// ?T => T without safety. + /// Given an optional value, returns the payload value. No safety checks. + /// Uses the `un_node` field. + optional_payload_unsafe, + /// *?T => *T with safety. + /// Given a pointer to an optional value, returns a pointer to the payload value, + /// with a safety check that the value is non-null. Used for `orelse`, `if` and `while`. + /// Uses the `un_node` field. + optional_payload_safe_ptr, + /// *?T => *T without safety. + /// Given a pointer to an optional value, returns a pointer to the payload value. + /// No safety checks. + /// Uses the `un_node` field. + optional_payload_unsafe_ptr, + /// E!T => T without safety. + /// Given an error union value, returns the payload value. No safety checks. + /// Uses the `un_node` field. + err_union_payload_unsafe, + /// *E!T => *T without safety. + /// Given a pointer to a error union value, returns a pointer to the payload value. + /// No safety checks. + /// Uses the `un_node` field. + err_union_payload_unsafe_ptr, + /// E!T => E without safety. + /// Given an error union value, returns the error code. No safety checks. + /// Uses the `un_node` field. + err_union_code, + /// *E!T => E without safety. + /// Given a pointer to an error union value, returns the error code. No safety checks. + /// Uses the `un_node` field. + err_union_code_ptr, + /// An enum literal. Uses the `str_tok` union field. + enum_literal, + /// A switch expression. Uses the `pl_node` union field. + /// AST node is the switch, payload is `SwitchBlock`. + switch_block, + /// Produces the value that will be switched on. For example, for + /// integers, it returns the integer with no modifications. For tagged unions, it + /// returns the active enum tag. + /// Uses the `un_node` union field. + switch_cond, + /// Same as `switch_cond`, except the input operand is a pointer to + /// what will be switched on. + /// Uses the `un_node` union field. + switch_cond_ref, + /// Produces the capture value for a switch prong. + /// Uses the `switch_capture` field. + /// If the `prong_index` field is max int, it means this is the capture + /// for the else/`_` prong. + switch_capture, + /// Produces the capture value for a switch prong. + /// Result is a pointer to the value. + /// Uses the `switch_capture` field. + /// If the `prong_index` field is max int, it means this is the capture + /// for the else/`_` prong. + switch_capture_ref, + /// Produces the capture value for a switch prong. + /// The prong is one of the multi cases. + /// Uses the `switch_capture` field. + switch_capture_multi, + /// Produces the capture value for a switch prong. + /// The prong is one of the multi cases. + /// Result is a pointer to the value. + /// Uses the `switch_capture` field. + switch_capture_multi_ref, + /// Produces the capture value for an inline switch prong tag capture. + /// Uses the `un_tok` field. + switch_capture_tag, + /// Given a + /// *A returns *A + /// *E!A returns *A + /// *?A returns *A + /// Uses the `un_node` field. + array_base_ptr, + /// Given a + /// *S returns *S + /// *E!S returns *S + /// *?S returns *S + /// Uses the `un_node` field. + field_base_ptr, + /// Checks that the type supports array init syntax. + /// Uses the `un_node` field. + validate_array_init_ty, + /// Checks that the type supports struct init syntax. + /// Uses the `un_node` field. + validate_struct_init_ty, + /// Given a set of `field_ptr` instructions, assumes they are all part of a struct + /// initialization expression, and emits compile errors for duplicate fields + /// as well as missing fields, if applicable. + /// This instruction asserts that there is at least one field_ptr instruction, + /// because it must use one of them to find out the struct type. + /// Uses the `pl_node` field. Payload is `Block`. + validate_struct_init, + /// Same as `validate_struct_init` but additionally communicates that the + /// resulting struct initialization value is within a comptime scope. + validate_struct_init_comptime, + /// Given a set of `elem_ptr_imm` instructions, assumes they are all part of an + /// array initialization expression, and emits a compile error if the number of + /// elements does not match the array type. + /// This instruction asserts that there is at least one `elem_ptr_imm` instruction, + /// because it must use one of them to find out the array type. + /// Uses the `pl_node` field. Payload is `Block`. + validate_array_init, + /// Same as `validate_array_init` but additionally communicates that the + /// resulting array initialization value is within a comptime scope. + validate_array_init_comptime, + /// Check that operand type supports the dereference operand (.*). + /// Uses the `un_node` field. + validate_deref, + /// A struct literal with a specified type, with no fields. + /// Uses the `un_node` field. + struct_init_empty, + /// Given a struct or union, and a field name as a string index, + /// returns the field type. Uses the `pl_node` field. Payload is `FieldType`. + field_type, + /// Given a struct or union, and a field name as a Ref, + /// returns the field type. Uses the `pl_node` field. Payload is `FieldTypeRef`. + field_type_ref, + /// Finalizes a typed struct or union initialization, performs validation, and returns the + /// struct or union value. + /// Uses the `pl_node` field. Payload is `StructInit`. + struct_init, + /// Struct initialization syntax, make the result a pointer. + /// Uses the `pl_node` field. Payload is `StructInit`. + struct_init_ref, + /// Struct initialization without a type. + /// Uses the `pl_node` field. Payload is `StructInitAnon`. + struct_init_anon, + /// Anonymous struct initialization syntax, make the result a pointer. + /// Uses the `pl_node` field. Payload is `StructInitAnon`. + struct_init_anon_ref, + /// Array initialization syntax. + /// Uses the `pl_node` field. Payload is `MultiOp`. + array_init, + /// Anonymous array initialization syntax. + /// Uses the `pl_node` field. Payload is `MultiOp`. + array_init_anon, + /// Array initialization syntax, make the result a pointer. + /// Uses the `pl_node` field. Payload is `MultiOp`. + array_init_ref, + /// Anonymous array initialization syntax, make the result a pointer. + /// Uses the `pl_node` field. Payload is `MultiOp`. + array_init_anon_ref, + /// Implements the `@unionInit` builtin. + /// Uses the `pl_node` field. Payload is `UnionInit`. + union_init, + /// Implements the `@typeInfo` builtin. Uses `un_node`. + type_info, + /// Implements the `@sizeOf` builtin. Uses `un_node`. + size_of, + /// Implements the `@bitSizeOf` builtin. Uses `un_node`. + bit_size_of, + + /// Implement builtin `@ptrToInt`. Uses `un_node`. + /// Convert a pointer to a `usize` integer. + ptr_to_int, + /// Emit an error message and fail compilation. + /// Uses the `un_node` field. + compile_error, + /// Changes the maximum number of backwards branches that compile-time + /// code execution can use before giving up and making a compile error. + /// Uses the `un_node` union field. + set_eval_branch_quota, + /// Converts an enum value into an integer. Resulting type will be the tag type + /// of the enum. Uses `un_node`. + enum_to_int, + /// Implement builtin `@alignOf`. Uses `un_node`. + align_of, + /// Implement builtin `@boolToInt`. Uses `un_node`. + bool_to_int, + /// Implement builtin `@embedFile`. Uses `un_node`. + embed_file, + /// Implement builtin `@errorName`. Uses `un_node`. + error_name, + /// Implement builtin `@panic`. Uses `un_node`. + panic, + /// Same as `panic` but forces comptime. + panic_comptime, + /// Implements `@trap`. + /// Uses the `node` field. + trap, + /// Implement builtin `@setRuntimeSafety`. Uses `un_node`. + set_runtime_safety, + /// Implement builtin `@sqrt`. Uses `un_node`. + sqrt, + /// Implement builtin `@sin`. Uses `un_node`. + sin, + /// Implement builtin `@cos`. Uses `un_node`. + cos, + /// Implement builtin `@tan`. Uses `un_node`. + tan, + /// Implement builtin `@exp`. Uses `un_node`. + exp, + /// Implement builtin `@exp2`. Uses `un_node`. + exp2, + /// Implement builtin `@log`. Uses `un_node`. + log, + /// Implement builtin `@log2`. Uses `un_node`. + log2, + /// Implement builtin `@log10`. Uses `un_node`. + log10, + /// Implement builtin `@fabs`. Uses `un_node`. + fabs, + /// Implement builtin `@floor`. Uses `un_node`. + floor, + /// Implement builtin `@ceil`. Uses `un_node`. + ceil, + /// Implement builtin `@trunc`. Uses `un_node`. + trunc, + /// Implement builtin `@round`. Uses `un_node`. + round, + /// Implement builtin `@tagName`. Uses `un_node`. + tag_name, + /// Implement builtin `@typeName`. Uses `un_node`. + type_name, + /// Implement builtin `@Frame`. Uses `un_node`. + frame_type, + /// Implement builtin `@frameSize`. Uses `un_node`. + frame_size, + + /// Implements the `@floatToInt` builtin. + /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. + float_to_int, + /// Implements the `@intToFloat` builtin. + /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. + int_to_float, + /// Implements the `@intToPtr` builtin. + /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. + int_to_ptr, + /// Converts an integer into an enum value. + /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. + int_to_enum, + /// Convert a larger float type to any other float type, possibly causing + /// a loss of precision. + /// Uses the `pl_node` field. AST is the `@floatCast` syntax. + /// Payload is `Bin` with lhs as the dest type, rhs the operand. + float_cast, + /// Implements the `@intCast` builtin. + /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. + /// Convert an integer value to another integer type, asserting that the destination type + /// can hold the same mathematical value. + int_cast, + /// Implements the `@ptrCast` builtin. + /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. + ptr_cast, + /// Implements the `@truncate` builtin. + /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. + truncate, + /// Implements the `@alignCast` builtin. + /// Uses `pl_node` with payload `Bin`. `lhs` is dest alignment, `rhs` is operand. + align_cast, + + /// Implements the `@hasDecl` builtin. + /// Uses the `pl_node` union field. Payload is `Bin`. + has_decl, + /// Implements the `@hasField` builtin. + /// Uses the `pl_node` union field. Payload is `Bin`. + has_field, + + /// Implements the `@clz` builtin. Uses the `un_node` union field. + clz, + /// Implements the `@ctz` builtin. Uses the `un_node` union field. + ctz, + /// Implements the `@popCount` builtin. Uses the `un_node` union field. + pop_count, + /// Implements the `@byteSwap` builtin. Uses the `un_node` union field. + byte_swap, + /// Implements the `@bitReverse` builtin. Uses the `un_node` union field. + bit_reverse, + + /// Implements the `@bitOffsetOf` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + bit_offset_of, + /// Implements the `@offsetOf` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + offset_of, + /// Implements the `@splat` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + splat, + /// Implements the `@reduce` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + reduce, + /// Implements the `@shuffle` builtin. + /// Uses the `pl_node` union field with payload `Shuffle`. + shuffle, + /// Implements the `@atomicLoad` builtin. + /// Uses the `pl_node` union field with payload `AtomicLoad`. + atomic_load, + /// Implements the `@atomicRmw` builtin. + /// Uses the `pl_node` union field with payload `AtomicRmw`. + atomic_rmw, + /// Implements the `@atomicStore` builtin. + /// Uses the `pl_node` union field with payload `AtomicStore`. + atomic_store, + /// Implements the `@mulAdd` builtin. + /// Uses the `pl_node` union field with payload `MulAdd`. + /// The addend communicates the type of the builtin. + /// The mulends need to be coerced to the same type. + mul_add, + /// Implements the `@fieldParentPtr` builtin. + /// Uses the `pl_node` union field with payload `FieldParentPtr`. + field_parent_ptr, + /// Implements the `@memcpy` builtin. + /// Uses the `pl_node` union field with payload `Memcpy`. + memcpy, + /// Implements the `@memset` builtin. + /// Uses the `pl_node` union field with payload `Memset`. + memset, + /// Implements the `@min` builtin. + /// Uses the `pl_node` union field with payload `Bin` + min, + /// Implements the `@max` builtin. + /// Uses the `pl_node` union field with payload `Bin` + max, + /// Implements the `@cImport` builtin. + /// Uses the `pl_node` union field with payload `Block`. + c_import, + + /// Allocates stack local memory. + /// Uses the `un_node` union field. The operand is the type of the allocated object. + /// The node source location points to a var decl node. + alloc, + /// Same as `alloc` except mutable. + alloc_mut, + /// Allocates comptime-mutable memory. + /// Uses the `un_node` union field. The operand is the type of the allocated object. + /// The node source location points to a var decl node. + alloc_comptime_mut, + /// Same as `alloc` except the type is inferred. + /// Uses the `node` union field. + alloc_inferred, + /// Same as `alloc_inferred` except mutable. + alloc_inferred_mut, + /// Allocates comptime const memory. + /// Uses the `node` union field. The type of the allocated object is inferred. + /// The node source location points to a var decl node. + alloc_inferred_comptime, + /// Same as `alloc_comptime_mut` except the type is inferred. + alloc_inferred_comptime_mut, + /// Each `store_to_inferred_ptr` puts the type of the stored value into a set, + /// and then `resolve_inferred_alloc` triggers peer type resolution on the set. + /// The operand is a `alloc_inferred` or `alloc_inferred_mut` instruction, which + /// is the allocation that needs to have its type inferred. + /// Uses the `un_node` field. The AST node is the var decl. + resolve_inferred_alloc, + /// Turns a pointer coming from an `alloc`, `alloc_inferred`, `alloc_inferred_comptime` or + /// `Extended.alloc` into a constant version of the same pointer. + /// Uses the `un_node` union field. + make_ptr_const, + + /// Implements `resume` syntax. Uses `un_node` field. + @"resume", + @"await", + + /// When a type or function refers to a comptime value from an outer + /// scope, that forms a closure over comptime value. The outer scope + /// will record a capture of that value, which encodes its current state + /// and marks it to persist. Uses `un_tok` field. Operand is the + /// instruction value to capture. + closure_capture, + /// The inner scope of a closure uses closure_get to retrieve the value + /// stored by the outer scope. Uses `inst_node` field. Operand is the + /// closure_capture instruction ref. + closure_get, + + /// A defer statement. + /// Uses the `defer` union field. + @"defer", + /// An errdefer statement with a code. + /// Uses the `err_defer_code` union field. + defer_err_code, + + /// Requests that Sema update the saved error return trace index for the enclosing + /// block, if the operand is .none or of an error/error-union type. + /// Uses the `save_err_ret_index` field. + save_err_ret_index, + /// Sets error return trace to zero if no operand is given, + /// otherwise sets the value to the given amount. + /// Uses the `restore_err_ret_index` union field. + restore_err_ret_index, + + /// The ZIR instruction tag is one of the `Extended` ones. + /// Uses the `extended` union field. + extended, + + /// Returns whether the instruction is one of the control flow "noreturn" types. + /// Function calls do not count. + pub fn isNoReturn(tag: Tag) bool { + return switch (tag) { + .param, + .param_comptime, + .param_anytype, + .param_anytype_comptime, + .add, + .addwrap, + .add_sat, + .add_unsafe, + .alloc, + .alloc_mut, + .alloc_comptime_mut, + .alloc_inferred, + .alloc_inferred_mut, + .alloc_inferred_comptime, + .alloc_inferred_comptime_mut, + .make_ptr_const, + .array_cat, + .array_mul, + .array_type, + .array_type_sentinel, + .vector_type, + .elem_type_index, + .indexable_ptr_len, + .anyframe_type, + .as, + .as_node, + .as_shift_operand, + .bit_and, + .bitcast, + .bit_or, + .block, + .block_inline, + .suspend_block, + .loop, + .bool_br_and, + .bool_br_or, + .bool_not, + .call, + .cmp_lt, + .cmp_lte, + .cmp_eq, + .cmp_gte, + .cmp_gt, + .cmp_neq, + .coerce_result_ptr, + .error_set_decl, + .error_set_decl_anon, + .error_set_decl_func, + .dbg_stmt, + .dbg_var_ptr, + .dbg_var_val, + .dbg_block_begin, + .dbg_block_end, + .decl_ref, + .decl_val, + .load, + .div, + .elem_ptr, + .elem_val, + .elem_ptr_node, + .elem_ptr_imm, + .elem_val_node, + .ensure_result_used, + .ensure_result_non_error, + .ensure_err_union_payload_void, + .@"export", + .export_value, + .field_ptr, + .field_ptr_init, + .field_val, + .field_call_bind, + .field_ptr_named, + .field_val_named, + .func, + .func_inferred, + .func_fancy, + .has_decl, + .int, + .int_big, + .float, + .float128, + .int_type, + .is_non_null, + .is_non_null_ptr, + .is_non_err, + .is_non_err_ptr, + .ret_is_non_err, + .mod_rem, + .mul, + .mulwrap, + .mul_sat, + .ref, + .shl, + .shl_sat, + .shr, + .store, + .store_node, + .store_to_block_ptr, + .store_to_inferred_ptr, + .str, + .sub, + .subwrap, + .sub_sat, + .negate, + .negate_wrap, + .typeof, + .typeof_builtin, + .xor, + .optional_type, + .optional_payload_safe, + .optional_payload_unsafe, + .optional_payload_safe_ptr, + .optional_payload_unsafe_ptr, + .err_union_payload_unsafe, + .err_union_payload_unsafe_ptr, + .err_union_code, + .err_union_code_ptr, + .ptr_type, + .enum_literal, + .merge_error_sets, + .error_union_type, + .bit_not, + .error_value, + .slice_start, + .slice_end, + .slice_sentinel, + .import, + .typeof_log2_int_type, + .resolve_inferred_alloc, + .set_eval_branch_quota, + .switch_capture, + .switch_capture_ref, + .switch_capture_multi, + .switch_capture_multi_ref, + .switch_capture_tag, + .switch_block, + .switch_cond, + .switch_cond_ref, + .array_base_ptr, + .field_base_ptr, + .validate_array_init_ty, + .validate_struct_init_ty, + .validate_struct_init, + .validate_struct_init_comptime, + .validate_array_init, + .validate_array_init_comptime, + .validate_deref, + .struct_init_empty, + .struct_init, + .struct_init_ref, + .struct_init_anon, + .struct_init_anon_ref, + .array_init, + .array_init_anon, + .array_init_ref, + .array_init_anon_ref, + .union_init, + .field_type, + .field_type_ref, + .int_to_enum, + .enum_to_int, + .type_info, + .size_of, + .bit_size_of, + .ptr_to_int, + .align_of, + .bool_to_int, + .embed_file, + .error_name, + .set_runtime_safety, + .sqrt, + .sin, + .cos, + .tan, + .exp, + .exp2, + .log, + .log2, + .log10, + .fabs, + .floor, + .ceil, + .trunc, + .round, + .tag_name, + .type_name, + .frame_type, + .frame_size, + .float_to_int, + .int_to_float, + .int_to_ptr, + .float_cast, + .int_cast, + .ptr_cast, + .truncate, + .align_cast, + .has_field, + .clz, + .ctz, + .pop_count, + .byte_swap, + .bit_reverse, + .div_exact, + .div_floor, + .div_trunc, + .mod, + .rem, + .shl_exact, + .shr_exact, + .bit_offset_of, + .offset_of, + .splat, + .reduce, + .shuffle, + .atomic_load, + .atomic_rmw, + .atomic_store, + .mul_add, + .builtin_call, + .field_parent_ptr, + .max, + .memcpy, + .memset, + .min, + .c_import, + .@"resume", + .@"await", + .ret_err_value_code, + .extended, + .closure_get, + .closure_capture, + .ret_ptr, + .ret_type, + .@"try", + .try_ptr, + //.try_inline, + //.try_ptr_inline, + .@"defer", + .defer_err_code, + .save_err_ret_index, + .restore_err_ret_index, + .for_len, + => false, + + .@"break", + .break_inline, + .condbr, + .condbr_inline, + .compile_error, + .ret_node, + .ret_load, + .ret_implicit, + .ret_err_value, + .@"unreachable", + .repeat, + .repeat_inline, + .panic, + .panic_comptime, + .trap, + .check_comptime_control_flow, + => true, + }; + } + + pub fn isParam(tag: Tag) bool { + return switch (tag) { + .param, + .param_comptime, + .param_anytype, + .param_anytype_comptime, + => true, + + else => false, + }; + } + + /// AstGen uses this to find out if `Ref.void_value` should be used in place + /// of the result of a given instruction. This allows Sema to forego adding + /// the instruction to the map after analysis. + pub fn isAlwaysVoid(tag: Tag, data: Data) bool { + return switch (tag) { + .dbg_stmt, + .dbg_var_ptr, + .dbg_var_val, + .dbg_block_begin, + .dbg_block_end, + .ensure_result_used, + .ensure_result_non_error, + .ensure_err_union_payload_void, + .set_eval_branch_quota, + .atomic_store, + .store, + .store_node, + .store_to_block_ptr, + .store_to_inferred_ptr, + .resolve_inferred_alloc, + .validate_array_init_ty, + .validate_struct_init_ty, + .validate_struct_init, + .validate_struct_init_comptime, + .validate_array_init, + .validate_array_init_comptime, + .validate_deref, + .@"export", + .export_value, + .set_runtime_safety, + .memcpy, + .memset, + .check_comptime_control_flow, + .@"defer", + .defer_err_code, + .restore_err_ret_index, + .save_err_ret_index, + => true, + + .param, + .param_comptime, + .param_anytype, + .param_anytype_comptime, + .add, + .addwrap, + .add_sat, + .add_unsafe, + .alloc, + .alloc_mut, + .alloc_comptime_mut, + .alloc_inferred, + .alloc_inferred_mut, + .alloc_inferred_comptime, + .alloc_inferred_comptime_mut, + .make_ptr_const, + .array_cat, + .array_mul, + .array_type, + .array_type_sentinel, + .vector_type, + .elem_type_index, + .indexable_ptr_len, + .anyframe_type, + .as, + .as_node, + .as_shift_operand, + .bit_and, + .bitcast, + .bit_or, + .block, + .block_inline, + .suspend_block, + .loop, + .bool_br_and, + .bool_br_or, + .bool_not, + .call, + .cmp_lt, + .cmp_lte, + .cmp_eq, + .cmp_gte, + .cmp_gt, + .cmp_neq, + .coerce_result_ptr, + .error_set_decl, + .error_set_decl_anon, + .error_set_decl_func, + .decl_ref, + .decl_val, + .load, + .div, + .elem_ptr, + .elem_val, + .elem_ptr_node, + .elem_ptr_imm, + .elem_val_node, + .field_ptr, + .field_ptr_init, + .field_val, + .field_call_bind, + .field_ptr_named, + .field_val_named, + .func, + .func_inferred, + .func_fancy, + .has_decl, + .int, + .int_big, + .float, + .float128, + .int_type, + .is_non_null, + .is_non_null_ptr, + .is_non_err, + .is_non_err_ptr, + .ret_is_non_err, + .mod_rem, + .mul, + .mulwrap, + .mul_sat, + .ref, + .shl, + .shl_sat, + .shr, + .str, + .sub, + .subwrap, + .sub_sat, + .negate, + .negate_wrap, + .typeof, + .typeof_builtin, + .xor, + .optional_type, + .optional_payload_safe, + .optional_payload_unsafe, + .optional_payload_safe_ptr, + .optional_payload_unsafe_ptr, + .err_union_payload_unsafe, + .err_union_payload_unsafe_ptr, + .err_union_code, + .err_union_code_ptr, + .ptr_type, + .enum_literal, + .merge_error_sets, + .error_union_type, + .bit_not, + .error_value, + .slice_start, + .slice_end, + .slice_sentinel, + .import, + .typeof_log2_int_type, + .switch_capture, + .switch_capture_ref, + .switch_capture_multi, + .switch_capture_multi_ref, + .switch_capture_tag, + .switch_block, + .switch_cond, + .switch_cond_ref, + .array_base_ptr, + .field_base_ptr, + .struct_init_empty, + .struct_init, + .struct_init_ref, + .struct_init_anon, + .struct_init_anon_ref, + .array_init, + .array_init_anon, + .array_init_ref, + .array_init_anon_ref, + .union_init, + .field_type, + .field_type_ref, + .int_to_enum, + .enum_to_int, + .type_info, + .size_of, + .bit_size_of, + .ptr_to_int, + .align_of, + .bool_to_int, + .embed_file, + .error_name, + .sqrt, + .sin, + .cos, + .tan, + .exp, + .exp2, + .log, + .log2, + .log10, + .fabs, + .floor, + .ceil, + .trunc, + .round, + .tag_name, + .type_name, + .frame_type, + .frame_size, + .float_to_int, + .int_to_float, + .int_to_ptr, + .float_cast, + .int_cast, + .ptr_cast, + .truncate, + .align_cast, + .has_field, + .clz, + .ctz, + .pop_count, + .byte_swap, + .bit_reverse, + .div_exact, + .div_floor, + .div_trunc, + .mod, + .rem, + .shl_exact, + .shr_exact, + .bit_offset_of, + .offset_of, + .splat, + .reduce, + .shuffle, + .atomic_load, + .atomic_rmw, + .mul_add, + .builtin_call, + .field_parent_ptr, + .max, + .min, + .c_import, + .@"resume", + .@"await", + .ret_err_value_code, + .closure_get, + .closure_capture, + .@"break", + .break_inline, + .condbr, + .condbr_inline, + .compile_error, + .ret_node, + .ret_load, + .ret_implicit, + .ret_err_value, + .ret_ptr, + .ret_type, + .@"unreachable", + .repeat, + .repeat_inline, + .panic, + .panic_comptime, + .trap, + .for_len, + .@"try", + .try_ptr, + //.try_inline, + //.try_ptr_inline, + => false, + + .extended => switch (data.extended.opcode) { + .fence, .set_cold, .breakpoint => true, + else => false, + }, + }; + } + + /// Used by debug safety-checking code. + pub const data_tags = list: { + @setEvalBranchQuota(2000); + break :list std.enums.directEnumArray(Tag, Data.FieldEnum, 0, .{ + .add = .pl_node, + .addwrap = .pl_node, + .add_sat = .pl_node, + .add_unsafe = .pl_node, + .sub = .pl_node, + .subwrap = .pl_node, + .sub_sat = .pl_node, + .mul = .pl_node, + .mulwrap = .pl_node, + .mul_sat = .pl_node, + + .param = .pl_tok, + .param_comptime = .pl_tok, + .param_anytype = .str_tok, + .param_anytype_comptime = .str_tok, + .array_cat = .pl_node, + .array_mul = .pl_node, + .array_type = .pl_node, + .array_type_sentinel = .pl_node, + .vector_type = .pl_node, + .elem_type_index = .bin, + .indexable_ptr_len = .un_node, + .anyframe_type = .un_node, + .as = .bin, + .as_node = .pl_node, + .as_shift_operand = .pl_node, + .bit_and = .pl_node, + .bitcast = .pl_node, + .bit_not = .un_node, + .bit_or = .pl_node, + .block = .pl_node, + .block_inline = .pl_node, + .suspend_block = .pl_node, + .bool_not = .un_node, + .bool_br_and = .bool_br, + .bool_br_or = .bool_br, + .@"break" = .@"break", + .break_inline = .@"break", + .check_comptime_control_flow = .un_node, + .for_len = .pl_node, + .call = .pl_node, + .cmp_lt = .pl_node, + .cmp_lte = .pl_node, + .cmp_eq = .pl_node, + .cmp_gte = .pl_node, + .cmp_gt = .pl_node, + .cmp_neq = .pl_node, + .coerce_result_ptr = .pl_node, + .condbr = .pl_node, + .condbr_inline = .pl_node, + .@"try" = .pl_node, + .try_ptr = .pl_node, + //.try_inline = .pl_node, + //.try_ptr_inline = .pl_node, + .error_set_decl = .pl_node, + .error_set_decl_anon = .pl_node, + .error_set_decl_func = .pl_node, + .dbg_stmt = .dbg_stmt, + .dbg_var_ptr = .str_op, + .dbg_var_val = .str_op, + .dbg_block_begin = .tok, + .dbg_block_end = .tok, + .decl_ref = .str_tok, + .decl_val = .str_tok, + .load = .un_node, + .div = .pl_node, + .elem_ptr = .pl_node, + .elem_ptr_node = .pl_node, + .elem_ptr_imm = .pl_node, + .elem_val = .pl_node, + .elem_val_node = .pl_node, + .ensure_result_used = .un_node, + .ensure_result_non_error = .un_node, + .ensure_err_union_payload_void = .un_node, + .error_union_type = .pl_node, + .error_value = .str_tok, + .@"export" = .pl_node, + .export_value = .pl_node, + .field_ptr = .pl_node, + .field_ptr_init = .pl_node, + .field_val = .pl_node, + .field_ptr_named = .pl_node, + .field_val_named = .pl_node, + .field_call_bind = .pl_node, + .func = .pl_node, + .func_inferred = .pl_node, + .func_fancy = .pl_node, + .import = .str_tok, + .int = .int, + .int_big = .str, + .float = .float, + .float128 = .pl_node, + .int_type = .int_type, + .is_non_null = .un_node, + .is_non_null_ptr = .un_node, + .is_non_err = .un_node, + .is_non_err_ptr = .un_node, + .ret_is_non_err = .un_node, + .loop = .pl_node, + .repeat = .node, + .repeat_inline = .node, + .merge_error_sets = .pl_node, + .mod_rem = .pl_node, + .ref = .un_tok, + .ret_node = .un_node, + .ret_load = .un_node, + .ret_implicit = .un_tok, + .ret_err_value = .str_tok, + .ret_err_value_code = .str_tok, + .ret_ptr = .node, + .ret_type = .node, + .ptr_type = .ptr_type, + .slice_start = .pl_node, + .slice_end = .pl_node, + .slice_sentinel = .pl_node, + .store = .bin, + .store_node = .pl_node, + .store_to_block_ptr = .bin, + .store_to_inferred_ptr = .bin, + .str = .str, + .negate = .un_node, + .negate_wrap = .un_node, + .typeof = .un_node, + .typeof_log2_int_type = .un_node, + .@"unreachable" = .@"unreachable", + .xor = .pl_node, + .optional_type = .un_node, + .optional_payload_safe = .un_node, + .optional_payload_unsafe = .un_node, + .optional_payload_safe_ptr = .un_node, + .optional_payload_unsafe_ptr = .un_node, + .err_union_payload_unsafe = .un_node, + .err_union_payload_unsafe_ptr = .un_node, + .err_union_code = .un_node, + .err_union_code_ptr = .un_node, + .enum_literal = .str_tok, + .switch_block = .pl_node, + .switch_cond = .un_node, + .switch_cond_ref = .un_node, + .switch_capture = .switch_capture, + .switch_capture_ref = .switch_capture, + .switch_capture_multi = .switch_capture, + .switch_capture_multi_ref = .switch_capture, + .switch_capture_tag = .un_tok, + .array_base_ptr = .un_node, + .field_base_ptr = .un_node, + .validate_array_init_ty = .pl_node, + .validate_struct_init_ty = .un_node, + .validate_struct_init = .pl_node, + .validate_struct_init_comptime = .pl_node, + .validate_array_init = .pl_node, + .validate_array_init_comptime = .pl_node, + .validate_deref = .un_node, + .struct_init_empty = .un_node, + .field_type = .pl_node, + .field_type_ref = .pl_node, + .struct_init = .pl_node, + .struct_init_ref = .pl_node, + .struct_init_anon = .pl_node, + .struct_init_anon_ref = .pl_node, + .array_init = .pl_node, + .array_init_anon = .pl_node, + .array_init_ref = .pl_node, + .array_init_anon_ref = .pl_node, + .union_init = .pl_node, + .type_info = .un_node, + .size_of = .un_node, + .bit_size_of = .un_node, + + .ptr_to_int = .un_node, + .compile_error = .un_node, + .set_eval_branch_quota = .un_node, + .enum_to_int = .un_node, + .align_of = .un_node, + .bool_to_int = .un_node, + .embed_file = .un_node, + .error_name = .un_node, + .panic = .un_node, + .panic_comptime = .un_node, + .trap = .node, + .set_runtime_safety = .un_node, + .sqrt = .un_node, + .sin = .un_node, + .cos = .un_node, + .tan = .un_node, + .exp = .un_node, + .exp2 = .un_node, + .log = .un_node, + .log2 = .un_node, + .log10 = .un_node, + .fabs = .un_node, + .floor = .un_node, + .ceil = .un_node, + .trunc = .un_node, + .round = .un_node, + .tag_name = .un_node, + .type_name = .un_node, + .frame_type = .un_node, + .frame_size = .un_node, + + .float_to_int = .pl_node, + .int_to_float = .pl_node, + .int_to_ptr = .pl_node, + .int_to_enum = .pl_node, + .float_cast = .pl_node, + .int_cast = .pl_node, + .ptr_cast = .pl_node, + .truncate = .pl_node, + .align_cast = .pl_node, + .typeof_builtin = .pl_node, + + .has_decl = .pl_node, + .has_field = .pl_node, + + .clz = .un_node, + .ctz = .un_node, + .pop_count = .un_node, + .byte_swap = .un_node, + .bit_reverse = .un_node, + + .div_exact = .pl_node, + .div_floor = .pl_node, + .div_trunc = .pl_node, + .mod = .pl_node, + .rem = .pl_node, + + .shl = .pl_node, + .shl_exact = .pl_node, + .shl_sat = .pl_node, + .shr = .pl_node, + .shr_exact = .pl_node, + + .bit_offset_of = .pl_node, + .offset_of = .pl_node, + .splat = .pl_node, + .reduce = .pl_node, + .shuffle = .pl_node, + .atomic_load = .pl_node, + .atomic_rmw = .pl_node, + .atomic_store = .pl_node, + .mul_add = .pl_node, + .builtin_call = .pl_node, + .field_parent_ptr = .pl_node, + .max = .pl_node, + .memcpy = .pl_node, + .memset = .pl_node, + .min = .pl_node, + .c_import = .pl_node, + + .alloc = .un_node, + .alloc_mut = .un_node, + .alloc_comptime_mut = .un_node, + .alloc_inferred = .node, + .alloc_inferred_mut = .node, + .alloc_inferred_comptime = .node, + .alloc_inferred_comptime_mut = .node, + .resolve_inferred_alloc = .un_node, + .make_ptr_const = .un_node, + + .@"resume" = .un_node, + .@"await" = .un_node, + + .closure_capture = .un_tok, + .closure_get = .inst_node, + + .@"defer" = .@"defer", + .defer_err_code = .defer_err_code, + + .save_err_ret_index = .save_err_ret_index, + .restore_err_ret_index = .restore_err_ret_index, + + .extended = .extended, + }); + }; + + // Uncomment to view how many tag slots are available. + //comptime { + // @compileLog("ZIR tags left: ", 256 - @typeInfo(Tag).Enum.fields.len); + //} + }; + + /// Rarer instructions are here; ones that do not fit in the 8-bit `Tag` enum. + /// `noreturn` instructions may not go here; they must be part of the main `Tag` enum. + pub const Extended = enum(u16) { + /// Declares a global variable. + /// `operand` is payload index to `ExtendedVar`. + /// `small` is `ExtendedVar.Small`. + variable, + /// A struct type definition. Contains references to ZIR instructions for + /// the field types, defaults, and alignments. + /// `operand` is payload index to `StructDecl`. + /// `small` is `StructDecl.Small`. + struct_decl, + /// An enum type definition. Contains references to ZIR instructions for + /// the field value expressions and optional type tag expression. + /// `operand` is payload index to `EnumDecl`. + /// `small` is `EnumDecl.Small`. + enum_decl, + /// A union type definition. Contains references to ZIR instructions for + /// the field types and optional type tag expression. + /// `operand` is payload index to `UnionDecl`. + /// `small` is `UnionDecl.Small`. + union_decl, + /// An opaque type definition. Contains references to decls and captures. + /// `operand` is payload index to `OpaqueDecl`. + /// `small` is `OpaqueDecl.Small`. + opaque_decl, + /// Implements the `@This` builtin. + /// `operand` is `src_node: i32`. + this, + /// Implements the `@returnAddress` builtin. + /// `operand` is `src_node: i32`. + ret_addr, + /// Implements the `@src` builtin. + /// `operand` is payload index to `LineColumn`. + builtin_src, + /// Implements the `@errorReturnTrace` builtin. + /// `operand` is `src_node: i32`. + error_return_trace, + /// Implements the `@frame` builtin. + /// `operand` is `src_node: i32`. + frame, + /// Implements the `@frameAddress` builtin. + /// `operand` is `src_node: i32`. + frame_address, + /// Same as `alloc` from `Tag` but may contain an alignment instruction. + /// `operand` is payload index to `AllocExtended`. + /// `small`: + /// * 0b000X - has type + /// * 0b00X0 - has alignment + /// * 0b0X00 - 1=const, 0=var + /// * 0bX000 - is comptime + alloc, + /// The `@extern` builtin. + /// `operand` is payload index to `BinNode`. + builtin_extern, + /// Inline assembly. + /// `small`: + /// * 0b00000000_000XXXXX - `outputs_len`. + /// * 0b000000XX_XXX00000 - `inputs_len`. + /// * 0b0XXXXX00_00000000 - `clobbers_len`. + /// * 0bX0000000_00000000 - is volatile + /// `operand` is payload index to `Asm`. + @"asm", + /// Same as `asm` except the assembly template is not a string literal but a comptime + /// expression. + /// The `asm_source` field of the Asm is not a null-terminated string + /// but instead a Ref. + asm_expr, + /// Log compile time variables and emit an error message. + /// `operand` is payload index to `NodeMultiOp`. + /// `small` is `operands_len`. + /// The AST node is the compile log builtin call. + compile_log, + /// The builtin `@TypeOf` which returns the type after Peer Type Resolution + /// of one or more params. + /// `operand` is payload index to `NodeMultiOp`. + /// `small` is `operands_len`. + /// The AST node is the builtin call. + typeof_peer, + /// Implements the `@addWithOverflow` builtin. + /// `operand` is payload index to `BinNode`. + /// `small` is unused. + add_with_overflow, + /// Implements the `@subWithOverflow` builtin. + /// `operand` is payload index to `BinNode`. + /// `small` is unused. + sub_with_overflow, + /// Implements the `@mulWithOverflow` builtin. + /// `operand` is payload index to `BinNode`. + /// `small` is unused. + mul_with_overflow, + /// Implements the `@shlWithOverflow` builtin. + /// `operand` is payload index to `BinNode`. + /// `small` is unused. + shl_with_overflow, + /// `operand` is payload index to `UnNode`. + c_undef, + /// `operand` is payload index to `UnNode`. + c_include, + /// `operand` is payload index to `BinNode`. + c_define, + /// `operand` is payload index to `UnNode`. + wasm_memory_size, + /// `operand` is payload index to `BinNode`. + wasm_memory_grow, + /// The `@prefetch` builtin. + /// `operand` is payload index to `BinNode`. + prefetch, + /// Given a pointer to a struct or object that contains virtual fields, returns the + /// named field. If there is no named field, searches in the type for a decl that + /// matches the field name. The decl is resolved and we ensure that it's a function + /// which can accept the object as the first parameter, with one pointer fixup. If + /// all of that works, this instruction produces a special "bound function" value + /// which contains both the function and the saved first parameter value. + /// Bound functions may only be used as the function parameter to a `call` or + /// `builtin_call` instruction. Any other use is invalid zir and may crash the compiler. + /// Uses `pl_node` field. The AST node is the `@field` builtin. Payload is FieldNamedNode. + field_call_bind_named, + /// Implements the `@fence` builtin. + /// `operand` is payload index to `UnNode`. + fence, + /// Implement builtin `@setFloatMode`. + /// `operand` is payload index to `UnNode`. + set_float_mode, + /// Implement builtin `@setAlignStack`. + /// `operand` is payload index to `UnNode`. + set_align_stack, + /// Implements `@setCold`. + /// `operand` is payload index to `UnNode`. + set_cold, + /// Implements the `@errSetCast` builtin. + /// `operand` is payload index to `BinNode`. `lhs` is dest type, `rhs` is operand. + err_set_cast, + /// `operand` is payload index to `UnNode`. + await_nosuspend, + /// Implements `@breakpoint`. + /// `operand` is `src_node: i32`. + breakpoint, + /// Implements the `@select` builtin. + /// operand` is payload index to `Select`. + select, + /// Implement builtin `@errToInt`. + /// `operand` is payload index to `UnNode`. + error_to_int, + /// Implement builtin `@intToError`. + /// `operand` is payload index to `UnNode`. + int_to_error, + /// Implement builtin `@Type`. + /// `operand` is payload index to `UnNode`. + /// `small` contains `NameStrategy`. + reify, + /// Implements the `@asyncCall` builtin. + /// `operand` is payload index to `AsyncCall`. + builtin_async_call, + /// Implements the `@cmpxchgStrong` and `@cmpxchgWeak` builtins. + /// `small` 0=>weak 1=>strong + /// `operand` is payload index to `Cmpxchg`. + cmpxchg, + /// Implement the builtin `@addrSpaceCast` + /// `Operand` is payload index to `BinNode`. `lhs` is dest type, `rhs` is operand. + addrspace_cast, + /// Implement builtin `@cVaArg`. + /// `operand` is payload index to `BinNode`. + c_va_arg, + /// Implement builtin `@cVaStart`. + /// `operand` is payload index to `UnNode`. + c_va_copy, + /// Implement builtin `@cVaStart`. + /// `operand` is payload index to `UnNode`. + c_va_end, + /// Implement builtin `@cVaStart`. + /// `operand` is `src_node: i32`. + c_va_start, + /// Implements the `@constCast` builtin. + /// `operand` is payload index to `UnNode`. + const_cast, + /// Implements the `@volatileCast` builtin. + /// `operand` is payload index to `UnNode`. + volatile_cast, + /// Implements the `@workItemId` builtin. + /// `operand` is payload index to `UnNode`. + work_item_id, + /// Implements the `@workGroupSize` builtin. + /// `operand` is payload index to `UnNode`. + work_group_size, + /// Implements the `@workGroupId` builtin. + /// `operand` is payload index to `UnNode`. + work_group_id, + + pub const InstData = struct { + opcode: Extended, + small: u16, + operand: u32, + }; + }; + + /// The position of a ZIR instruction within the `Zir` instructions array. + pub const Index = u32; + + /// A reference to a TypedValue or ZIR instruction. + /// + /// If the Ref has a tag in this enum, it refers to a TypedValue which may be + /// retrieved with Ref.toTypedValue(). + /// + /// If the value of a Ref does not have a tag, it refers to a ZIR instruction. + /// + /// The first values after the the last tag refer to ZIR instructions which may + /// be derived by subtracting `typed_value_map.len`. + /// + /// When adding a tag to this enum, consider adding a corresponding entry to + /// `primitives` in astgen. + /// + /// The tag type is specified so that it is safe to bitcast between `[]u32` + /// and `[]Ref`. + pub const Ref = enum(u32) { + /// This Ref does not correspond to any ZIR instruction or constant + /// value and may instead be used as a sentinel to indicate null. + none, + + u1_type, + u8_type, + i8_type, + u16_type, + i16_type, + u29_type, + u32_type, + i32_type, + u64_type, + i64_type, + u128_type, + i128_type, + usize_type, + isize_type, + c_short_type, + c_ushort_type, + c_int_type, + c_uint_type, + c_long_type, + c_ulong_type, + c_longlong_type, + c_ulonglong_type, + c_longdouble_type, + f16_type, + f32_type, + f64_type, + f80_type, + f128_type, + anyopaque_type, + bool_type, + void_type, + type_type, + anyerror_type, + comptime_int_type, + comptime_float_type, + noreturn_type, + anyframe_type, + null_type, + undefined_type, + enum_literal_type, + atomic_order_type, + atomic_rmw_op_type, + calling_convention_type, + address_space_type, + float_mode_type, + reduce_op_type, + modifier_type, + prefetch_options_type, + export_options_type, + extern_options_type, + type_info_type, + manyptr_u8_type, + manyptr_const_u8_type, + fn_noreturn_no_args_type, + fn_void_no_args_type, + fn_naked_noreturn_no_args_type, + fn_ccc_void_no_args_type, + single_const_pointer_to_comptime_int_type, + const_slice_u8_type, + anyerror_void_error_union_type, + generic_poison_type, + + /// `undefined` (untyped) + undef, + /// `0` (comptime_int) + zero, + /// `1` (comptime_int) + one, + /// `{}` + void_value, + /// `unreachable` (noreturn type) + unreachable_value, + /// `null` (untyped) + null_value, + /// `true` + bool_true, + /// `false` + bool_false, + /// `.{}` (untyped) + empty_struct, + /// `0` (usize) + zero_usize, + /// `1` (usize) + one_usize, + /// `std.builtin.CallingConvention.C` + calling_convention_c, + /// `std.builtin.CallingConvention.Inline` + calling_convention_inline, + /// Used for generic parameters where the type and value + /// is not known until generic function instantiation. + generic_poison, + + ref_start_index, + + _, + }; + + /// All instructions have an 8-byte payload, which is contained within + /// this union. `Tag` determines which union field is active, as well as + /// how to interpret the data within. + pub const Data = union { + /// Used for `Tag.extended`. The extended opcode determines the meaning + /// of the `small` and `operand` fields. + extended: Extended.InstData, + /// Used for unary operators, with an AST node source location. + un_node: struct { + /// Offset from Decl AST node index. + src_node: i32, + /// The meaning of this operand depends on the corresponding `Tag`. + operand: Ref, + + pub fn src(self: @This()) LazySrcLoc { + return LazySrcLoc.nodeOffset(self.src_node); + } + }, + /// Used for unary operators, with a token source location. + un_tok: struct { + /// Offset from Decl AST token index. + src_tok: Ast.TokenIndex, + /// The meaning of this operand depends on the corresponding `Tag`. + operand: Ref, + + pub fn src(self: @This()) LazySrcLoc { + return .{ .token_offset = self.src_tok }; + } + }, + pl_node: struct { + /// Offset from Decl AST node index. + /// `Tag` determines which kind of AST node this points to. + src_node: i32, + /// index into extra. + /// `Tag` determines what lives there. + payload_index: u32, + + pub fn src(self: @This()) LazySrcLoc { + return LazySrcLoc.nodeOffset(self.src_node); + } + }, + pl_tok: struct { + /// Offset from Decl AST token index. + src_tok: Ast.TokenIndex, + /// index into extra. + /// `Tag` determines what lives there. + payload_index: u32, + + pub fn src(self: @This()) LazySrcLoc { + return .{ .token_offset = self.src_tok }; + } + }, + bin: Bin, + /// For strings which may contain null bytes. + str: struct { + /// Offset into `string_bytes`. + start: u32, + /// Number of bytes in the string. + len: u32, + + pub fn get(self: @This(), code: Zir) []const u8 { + return code.string_bytes[self.start..][0..self.len]; + } + }, + str_tok: struct { + /// Offset into `string_bytes`. Null-terminated. + start: u32, + /// Offset from Decl AST token index. + src_tok: u32, + + pub fn get(self: @This(), code: Zir) [:0]const u8 { + return code.nullTerminatedString(self.start); + } + + pub fn src(self: @This()) LazySrcLoc { + return .{ .token_offset = self.src_tok }; + } + }, + /// Offset from Decl AST token index. + tok: Ast.TokenIndex, + /// Offset from Decl AST node index. + node: i32, + int: u64, + float: f64, + ptr_type: struct { + flags: packed struct { + is_allowzero: bool, + is_mutable: bool, + is_volatile: bool, + has_sentinel: bool, + has_align: bool, + has_addrspace: bool, + has_bit_range: bool, + _: u1 = undefined, + }, + size: std.builtin.Type.Pointer.Size, + /// Index into extra. See `PtrType`. + payload_index: u32, + }, + int_type: struct { + /// Offset from Decl AST node index. + /// `Tag` determines which kind of AST node this points to. + src_node: i32, + signedness: std.builtin.Signedness, + bit_count: u16, + + pub fn src(self: @This()) LazySrcLoc { + return LazySrcLoc.nodeOffset(self.src_node); + } + }, + bool_br: struct { + lhs: Ref, + /// Points to a `Block`. + payload_index: u32, + }, + @"unreachable": struct { + /// Offset from Decl AST node index. + /// `Tag` determines which kind of AST node this points to. + src_node: i32, + force_comptime: bool, + + pub fn src(self: @This()) LazySrcLoc { + return LazySrcLoc.nodeOffset(self.src_node); + } + }, + @"break": struct { + operand: Ref, + payload_index: u32, + }, + switch_capture: struct { + switch_inst: Index, + prong_index: u32, + }, + dbg_stmt: LineColumn, + /// Used for unary operators which reference an inst, + /// with an AST node source location. + inst_node: struct { + /// Offset from Decl AST node index. + src_node: i32, + /// The meaning of this operand depends on the corresponding `Tag`. + inst: Index, + + pub fn src(self: @This()) LazySrcLoc { + return LazySrcLoc.nodeOffset(self.src_node); + } + }, + str_op: struct { + /// Offset into `string_bytes`. Null-terminated. + str: u32, + operand: Ref, + + pub fn getStr(self: @This(), zir: Zir) [:0]const u8 { + return zir.nullTerminatedString(self.str); + } + }, + @"defer": struct { + index: u32, + len: u32, + }, + defer_err_code: struct { + err_code: Ref, + payload_index: u32, + }, + save_err_ret_index: struct { + operand: Ref, // If error type (or .none), save new trace index + }, + restore_err_ret_index: struct { + block: Ref, // If restored, the index is from this block's entrypoint + operand: Ref, // If non-error (or .none), then restore the index + }, + + // Make sure we don't accidentally add a field to make this union + // bigger than expected. Note that in Debug builds, Zig is allowed + // to insert a secret field for safety checks. + comptime { + if (builtin.mode != .Debug and builtin.mode != .ReleaseSafe) { + assert(@sizeOf(Data) == 8); + } + } + + /// TODO this has to be kept in sync with `Data` which we want to be an untagged + /// union. There is some kind of language awkwardness here and it has to do with + /// deserializing an untagged union (in this case `Data`) from a file, and trying + /// to preserve the hidden safety field. + pub const FieldEnum = enum { + extended, + un_node, + un_tok, + pl_node, + pl_tok, + bin, + str, + str_tok, + tok, + node, + int, + float, + ptr_type, + int_type, + bool_br, + @"unreachable", + @"break", + switch_capture, + dbg_stmt, + inst_node, + str_op, + @"defer", + defer_err_code, + save_err_ret_index, + restore_err_ret_index, + }; + }; + + pub const Break = struct { + pub const no_src_node = std.math.maxInt(i32); + + block_inst: Index, + operand_src_node: i32, + }; + + /// Trailing: + /// 0. Output for every outputs_len + /// 1. Input for every inputs_len + /// 2. clobber: u32 // index into string_bytes (null terminated) for every clobbers_len. + pub const Asm = struct { + src_node: i32, + // null-terminated string index + asm_source: u32, + /// 1 bit for each outputs_len: whether it uses `-> T` or not. + /// 0b0 - operand is a pointer to where to store the output. + /// 0b1 - operand is a type; asm expression has the output as the result. + /// 0b0X is the first output, 0bX0 is the second, etc. + output_type_bits: u32, + + pub const Output = struct { + /// index into string_bytes (null terminated) + name: u32, + /// index into string_bytes (null terminated) + constraint: u32, + /// How to interpret this is determined by `output_type_bits`. + operand: Ref, + }; + + pub const Input = struct { + /// index into string_bytes (null terminated) + name: u32, + /// index into string_bytes (null terminated) + constraint: u32, + operand: Ref, + }; + }; + + /// Trailing: + /// if (ret_body_len == 1) { + /// 0. return_type: Ref + /// } + /// if (ret_body_len > 1) { + /// 1. return_type: Index // for each ret_body_len + /// } + /// 2. body: Index // for each body_len + /// 3. src_locs: SrcLocs // if body_len != 0 + pub const Func = struct { + /// If this is 0 it means a void return type. + /// If this is 1 it means return_type is a simple Ref + ret_body_len: u32, + /// Points to the block that contains the param instructions for this function. + param_block: Index, + body_len: u32, + + pub const SrcLocs = struct { + /// Line index in the source file relative to the parent decl. + lbrace_line: u32, + /// Line index in the source file relative to the parent decl. + rbrace_line: u32, + /// lbrace_column is least significant bits u16 + /// rbrace_column is most significant bits u16 + columns: u32, + }; + }; + + /// Trailing: + /// 0. lib_name: u32, // null terminated string index, if has_lib_name is set + /// if (has_align_ref and !has_align_body) { + /// 1. align: Ref, + /// } + /// if (has_align_body) { + /// 2. align_body_len: u32 + /// 3. align_body: u32 // for each align_body_len + /// } + /// if (has_addrspace_ref and !has_addrspace_body) { + /// 4. addrspace: Ref, + /// } + /// if (has_addrspace_body) { + /// 5. addrspace_body_len: u32 + /// 6. addrspace_body: u32 // for each addrspace_body_len + /// } + /// if (has_section_ref and !has_section_body) { + /// 7. section: Ref, + /// } + /// if (has_section_body) { + /// 8. section_body_len: u32 + /// 9. section_body: u32 // for each section_body_len + /// } + /// if (has_cc_ref and !has_cc_body) { + /// 10. cc: Ref, + /// } + /// if (has_cc_body) { + /// 11. cc_body_len: u32 + /// 12. cc_body: u32 // for each cc_body_len + /// } + /// if (has_ret_ty_ref and !has_ret_ty_body) { + /// 13. ret_ty: Ref, + /// } + /// if (has_ret_ty_body) { + /// 14. ret_ty_body_len: u32 + /// 15. ret_ty_body: u32 // for each ret_ty_body_len + /// } + /// 16. noalias_bits: u32 // if has_any_noalias + /// - each bit starting with LSB corresponds to parameter indexes + /// 17. body: Index // for each body_len + /// 18. src_locs: Func.SrcLocs // if body_len != 0 + pub const FuncFancy = struct { + /// Points to the block that contains the param instructions for this function. + param_block: Index, + body_len: u32, + bits: Bits, + + /// If both has_cc_ref and has_cc_body are false, it means auto calling convention. + /// If both has_align_ref and has_align_body are false, it means default alignment. + /// If both has_ret_ty_ref and has_ret_ty_body are false, it means void return type. + /// If both has_section_ref and has_section_body are false, it means default section. + /// If both has_addrspace_ref and has_addrspace_body are false, it means default addrspace. + pub const Bits = packed struct { + is_var_args: bool, + is_inferred_error: bool, + is_test: bool, + is_extern: bool, + is_noinline: bool, + has_align_ref: bool, + has_align_body: bool, + has_addrspace_ref: bool, + has_addrspace_body: bool, + has_section_ref: bool, + has_section_body: bool, + has_cc_ref: bool, + has_cc_body: bool, + has_ret_ty_ref: bool, + has_ret_ty_body: bool, + has_lib_name: bool, + has_any_noalias: bool, + _: u15 = undefined, + }; + }; + + /// Trailing: + /// 0. lib_name: u32, // null terminated string index, if has_lib_name is set + /// 1. align: Ref, // if has_align is set + /// 2. init: Ref // if has_init is set + /// The source node is obtained from the containing `block_inline`. + pub const ExtendedVar = struct { + var_type: Ref, + + pub const Small = packed struct { + has_lib_name: bool, + has_align: bool, + has_init: bool, + is_extern: bool, + is_threadlocal: bool, + _: u11 = undefined, + }; + }; + + /// This data is stored inside extra, with trailing operands according to `operands_len`. + /// Each operand is a `Ref`. + pub const MultiOp = struct { + operands_len: u32, + }; + + /// Trailing: operand: Ref, // for each `operands_len` (stored in `small`). + pub const NodeMultiOp = struct { + src_node: i32, + }; + + /// This data is stored inside extra, with trailing operands according to `body_len`. + /// Each operand is an `Index`. + pub const Block = struct { + body_len: u32, + }; + + /// Stored inside extra, with trailing arguments according to `args_len`. + /// Implicit 0. arg_0_start: u32, // always same as `args_len` + /// 1. arg_end: u32, // for each `args_len` + /// arg_N_start is the same as arg_N-1_end + pub const Call = struct { + // Note: Flags *must* come first so that unusedResultExpr + // can find it when it goes to modify them. + flags: Flags, + callee: Ref, + + pub const Flags = packed struct { + /// std.builtin.CallModifier in packed form + pub const PackedModifier = u3; + pub const PackedArgsLen = u27; + + packed_modifier: PackedModifier, + ensure_result_used: bool = false, + pop_error_return_trace: bool, + args_len: PackedArgsLen, + + comptime { + if (@sizeOf(Flags) != 4 or @bitSizeOf(Flags) != 32) + @compileError("Layout of Call.Flags needs to be updated!"); + if (@bitSizeOf(std.builtin.CallModifier) != @bitSizeOf(PackedModifier)) + @compileError("Call.Flags.PackedModifier needs to be updated!"); + } + }; + }; + + pub const TypeOfPeer = struct { + src_node: i32, + body_len: u32, + body_index: u32, + }; + + pub const BuiltinCall = struct { + // Note: Flags *must* come first so that unusedResultExpr + // can find it when it goes to modify them. + flags: Flags, + modifier: Ref, + callee: Ref, + args: Ref, + + pub const Flags = packed struct { + is_nosuspend: bool, + is_comptime: bool, + ensure_result_used: bool, + _: u29 = undefined, + + comptime { + if (@sizeOf(Flags) != 4 or @bitSizeOf(Flags) != 32) + @compileError("Layout of BuiltinCall.Flags needs to be updated!"); + } + }; + }; + + /// This data is stored inside extra, with two sets of trailing `Ref`: + /// * 0. the then body, according to `then_body_len`. + /// * 1. the else body, according to `else_body_len`. + pub const CondBr = struct { + condition: Ref, + then_body_len: u32, + else_body_len: u32, + }; + + /// This data is stored inside extra, trailed by: + /// * 0. body: Index // for each `body_len`. + pub const Try = struct { + /// The error union to unwrap. + operand: Ref, + body_len: u32, + }; + + /// Stored in extra. Depending on the flags in Data, there will be up to 5 + /// trailing Ref fields: + /// 0. sentinel: Ref // if `has_sentinel` flag is set + /// 1. align: Ref // if `has_align` flag is set + /// 2. address_space: Ref // if `has_addrspace` flag is set + /// 3. bit_start: Ref // if `has_bit_range` flag is set + /// 4. host_size: Ref // if `has_bit_range` flag is set + pub const PtrType = struct { + elem_type: Ref, + src_node: i32, + }; + + pub const ArrayTypeSentinel = struct { + len: Ref, + sentinel: Ref, + elem_type: Ref, + }; + + pub const SliceStart = struct { + lhs: Ref, + start: Ref, + }; + + pub const SliceEnd = struct { + lhs: Ref, + start: Ref, + end: Ref, + }; + + pub const SliceSentinel = struct { + lhs: Ref, + start: Ref, + end: Ref, + sentinel: Ref, + }; + + /// The meaning of these operands depends on the corresponding `Tag`. + pub const Bin = struct { + lhs: Ref, + rhs: Ref, + }; + + pub const BinNode = struct { + node: i32, + lhs: Ref, + rhs: Ref, + }; + + pub const UnNode = struct { + node: i32, + operand: Ref, + }; + + pub const ElemPtrImm = struct { + ptr: Ref, + index: u32, + }; + + /// 0. multi_cases_len: u32 // If has_multi_cases is set. + /// 1. else_body { // If has_else or has_under is set. + /// body_len: u32, + /// body member Index for every body_len + /// } + /// 2. scalar_cases: { // for every scalar_cases_len + /// item: Ref, + /// body_len: u32, + /// body member Index for every body_len + /// } + /// 3. multi_cases: { // for every multi_cases_len + /// items_len: u32, + /// ranges_len: u32, + /// body_len: u32, + /// item: Ref // for every items_len + /// ranges: { // for every ranges_len + /// item_first: Ref, + /// item_last: Ref, + /// } + /// body member Index for every body_len + /// } + pub const SwitchBlock = struct { + /// This is always a `switch_cond` or `switch_cond_ref` instruction. + /// If it is a `switch_cond_ref` instruction, bits.is_ref is always true. + /// If it is a `switch_cond` instruction, bits.is_ref is always false. + /// Both `switch_cond` and `switch_cond_ref` return a value, not a pointer, + /// that is useful for the case items, but cannot be used for capture values. + /// For the capture values, Sema is expected to find the operand of this operand + /// and use that. + operand: Ref, + bits: Bits, + + pub const Bits = packed struct { + /// If true, one or more prongs have multiple items. + has_multi_cases: bool, + /// If true, there is an else prong. This is mutually exclusive with `has_under`. + has_else: bool, + /// If true, there is an underscore prong. This is mutually exclusive with `has_else`. + has_under: bool, + scalar_cases_len: ScalarCasesLen, + + pub const ScalarCasesLen = u29; + + pub fn specialProng(bits: Bits) SpecialProng { + const has_else: u2 = @boolToInt(bits.has_else); + const has_under: u2 = @boolToInt(bits.has_under); + return switch ((has_else << 1) | has_under) { + 0b00 => .none, + 0b01 => .under, + 0b10 => .@"else", + 0b11 => unreachable, + }; + } + }; + + pub const ScalarProng = struct { + item: Ref, + body: []const Index, + }; + + /// TODO performance optimization: instead of having this helper method + /// change the definition of switch_capture instruction to store extra_index + /// instead of prong_index. This way, Sema won't be doing O(N^2) iterations + /// over the switch prongs. + pub fn getScalarProng( + self: SwitchBlock, + zir: Zir, + extra_end: usize, + prong_index: usize, + ) ScalarProng { + var extra_index: usize = extra_end; + + if (self.bits.has_multi_cases) { + extra_index += 1; + } + + if (self.bits.specialProng() != .none) { + const body_len = @truncate(u31, zir.extra[extra_index]); + extra_index += 1; + const body = zir.extra[extra_index..][0..body_len]; + extra_index += body.len; + } + + var scalar_i: usize = 0; + while (true) : (scalar_i += 1) { + const item = @intToEnum(Ref, zir.extra[extra_index]); + extra_index += 1; + const body_len = @truncate(u31, zir.extra[extra_index]); + extra_index += 1; + const body = zir.extra[extra_index..][0..body_len]; + extra_index += body.len; + + if (scalar_i < prong_index) continue; + + return .{ + .item = item, + .body = body, + }; + } + } + + pub const MultiProng = struct { + items: []const Ref, + body: []const Index, + }; + + pub fn getMultiProng( + self: SwitchBlock, + zir: Zir, + extra_end: usize, + prong_index: usize, + ) MultiProng { + // +1 for self.bits.has_multi_cases == true + var extra_index: usize = extra_end + 1; + + if (self.bits.specialProng() != .none) { + const body_len = @truncate(u31, zir.extra[extra_index]); + extra_index += 1; + const body = zir.extra[extra_index..][0..body_len]; + extra_index += body.len; + } + + var scalar_i: usize = 0; + while (scalar_i < self.bits.scalar_cases_len) : (scalar_i += 1) { + extra_index += 1; + const body_len = @truncate(u31, zir.extra[extra_index]); + extra_index += 1; + extra_index += body_len; + } + var multi_i: u32 = 0; + while (true) : (multi_i += 1) { + const items_len = zir.extra[extra_index]; + extra_index += 1; + const ranges_len = zir.extra[extra_index]; + extra_index += 1; + const body_len = @truncate(u31, zir.extra[extra_index]); + extra_index += 1; + const items = zir.refSlice(extra_index, items_len); + extra_index += items_len; + // Each range has a start and an end. + extra_index += 2 * ranges_len; + + const body = zir.extra[extra_index..][0..body_len]; + extra_index += body_len; + + if (multi_i < prong_index) continue; + return .{ + .items = items, + .body = body, + }; + } + } + }; + + pub const Field = struct { + lhs: Ref, + /// Offset into `string_bytes`. + field_name_start: u32, + }; + + pub const FieldNamed = struct { + lhs: Ref, + field_name: Ref, + }; + + pub const FieldNamedNode = struct { + node: i32, + lhs: Ref, + field_name: Ref, + }; + + pub const As = struct { + dest_type: Ref, + operand: Ref, + }; + + /// Trailing: + /// 0. src_node: i32, // if has_src_node + /// 1. fields_len: u32, // if has_fields_len + /// 2. decls_len: u32, // if has_decls_len + /// 3. backing_int_body_len: u32, // if has_backing_int + /// 4. backing_int_ref: Ref, // if has_backing_int and backing_int_body_len is 0 + /// 5. backing_int_body_inst: Inst, // if has_backing_int and backing_int_body_len is > 0 + /// 6. decl_bits: u32 // for every 8 decls + /// - sets of 4 bits: + /// 0b000X: whether corresponding decl is pub + /// 0b00X0: whether corresponding decl is exported + /// 0b0X00: whether corresponding decl has an align expression + /// 0bX000: whether corresponding decl has a linksection or an address space expression + /// 7. decl: { // for every decls_len + /// src_hash: [4]u32, // hash of source bytes + /// line: u32, // line number of decl, relative to parent + /// name: u32, // null terminated string index + /// - 0 means comptime or usingnamespace decl. + /// - if name == 0 `is_exported` determines which one: 0=comptime,1=usingnamespace + /// - 1 means test decl with no name. + /// - 2 means that the test is a decltest, doc_comment gives the name of the identifier + /// - if there is a 0 byte at the position `name` indexes, it indicates + /// this is a test decl, and the name starts at `name+1`. + /// value: Index, + /// doc_comment: u32, 0 if no doc comment, if this is a decltest, doc_comment references the decl name in the string table + /// align: Ref, // if corresponding bit is set + /// link_section_or_address_space: { // if corresponding bit is set. + /// link_section: Ref, + /// address_space: Ref, + /// } + /// } + /// 8. flags: u32 // for every 8 fields + /// - sets of 4 bits: + /// 0b000X: whether corresponding field has an align expression + /// 0b00X0: whether corresponding field has a default expression + /// 0b0X00: whether corresponding field is comptime + /// 0bX000: whether corresponding field has a type expression + /// 9. fields: { // for every fields_len + /// field_name: u32, // if !is_tuple + /// doc_comment: u32, // 0 if no doc comment + /// field_type: Ref, // if corresponding bit is not set. none means anytype. + /// field_type_body_len: u32, // if corresponding bit is set + /// align_body_len: u32, // if corresponding bit is set + /// init_body_len: u32, // if corresponding bit is set + /// } + /// 10. bodies: { // for every fields_len + /// field_type_body_inst: Inst, // for each field_type_body_len + /// align_body_inst: Inst, // for each align_body_len + /// init_body_inst: Inst, // for each init_body_len + /// } + pub const StructDecl = struct { + pub const Small = packed struct { + has_src_node: bool, + has_fields_len: bool, + has_decls_len: bool, + has_backing_int: bool, + known_non_opv: bool, + known_comptime_only: bool, + is_tuple: bool, + name_strategy: NameStrategy, + layout: std.builtin.Type.ContainerLayout, + _: u5 = undefined, + }; + }; + + pub const NameStrategy = enum(u2) { + /// Use the same name as the parent declaration name. + /// e.g. `const Foo = struct {...};`. + parent, + /// Use the name of the currently executing comptime function call, + /// with the current parameters. e.g. `ArrayList(i32)`. + func, + /// Create an anonymous name for this declaration. + /// Like this: "ParentDeclName_struct_69" + anon, + /// Use the name specified in the next `dbg_var_{val,ptr}` instruction. + dbg_var, + }; + + /// Trailing: + /// 0. src_node: i32, // if has_src_node + /// 1. tag_type: Ref, // if has_tag_type + /// 2. body_len: u32, // if has_body_len + /// 3. fields_len: u32, // if has_fields_len + /// 4. decls_len: u32, // if has_decls_len + /// 5. decl_bits: u32 // for every 8 decls + /// - sets of 4 bits: + /// 0b000X: whether corresponding decl is pub + /// 0b00X0: whether corresponding decl is exported + /// 0b0X00: whether corresponding decl has an align expression + /// 0bX000: whether corresponding decl has a linksection or an address space expression + /// 6. decl: { // for every decls_len + /// src_hash: [4]u32, // hash of source bytes + /// line: u32, // line number of decl, relative to parent + /// name: u32, // null terminated string index + /// - 0 means comptime or usingnamespace decl. + /// - if name == 0 `is_exported` determines which one: 0=comptime,1=usingnamespace + /// - 1 means test decl with no name. + /// - if there is a 0 byte at the position `name` indexes, it indicates + /// this is a test decl, and the name starts at `name+1`. + /// value: Index, + /// doc_comment: u32, // 0 if no doc_comment + /// align: Ref, // if corresponding bit is set + /// link_section_or_address_space: { // if corresponding bit is set. + /// link_section: Ref, + /// address_space: Ref, + /// } + /// } + /// 7. inst: Index // for every body_len + /// 8. has_bits: u32 // for every 32 fields + /// - the bit is whether corresponding field has an value expression + /// 9. fields: { // for every fields_len + /// field_name: u32, + /// doc_comment: u32, // 0 if no doc_comment + /// value: Ref, // if corresponding bit is set + /// } + pub const EnumDecl = struct { + pub const Small = packed struct { + has_src_node: bool, + has_tag_type: bool, + has_body_len: bool, + has_fields_len: bool, + has_decls_len: bool, + name_strategy: NameStrategy, + nonexhaustive: bool, + _: u8 = undefined, + }; + }; + + /// Trailing: + /// 0. src_node: i32, // if has_src_node + /// 1. tag_type: Ref, // if has_tag_type + /// 2. body_len: u32, // if has_body_len + /// 3. fields_len: u32, // if has_fields_len + /// 4. decls_len: u32, // if has_decls_len + /// 5. decl_bits: u32 // for every 8 decls + /// - sets of 4 bits: + /// 0b000X: whether corresponding decl is pub + /// 0b00X0: whether corresponding decl is exported + /// 0b0X00: whether corresponding decl has an align expression + /// 0bX000: whether corresponding decl has a linksection or an address space expression + /// 6. decl: { // for every decls_len + /// src_hash: [4]u32, // hash of source bytes + /// line: u32, // line number of decl, relative to parent + /// name: u32, // null terminated string index + /// - 0 means comptime or usingnamespace decl. + /// - if name == 0 `is_exported` determines which one: 0=comptime,1=usingnamespace + /// - 1 means test decl with no name. + /// - if there is a 0 byte at the position `name` indexes, it indicates + /// this is a test decl, and the name starts at `name+1`. + /// value: Index, + /// doc_comment: u32, // 0 if no doc comment + /// align: Ref, // if corresponding bit is set + /// link_section_or_address_space: { // if corresponding bit is set. + /// link_section: Ref, + /// address_space: Ref, + /// } + /// } + /// 7. inst: Index // for every body_len + /// 8. has_bits: u32 // for every 8 fields + /// - sets of 4 bits: + /// 0b000X: whether corresponding field has a type expression + /// 0b00X0: whether corresponding field has a align expression + /// 0b0X00: whether corresponding field has a tag value expression + /// 0bX000: unused + /// 9. fields: { // for every fields_len + /// field_name: u32, // null terminated string index + /// doc_comment: u32, // 0 if no doc comment + /// field_type: Ref, // if corresponding bit is set + /// - if none, means `anytype`. + /// align: Ref, // if corresponding bit is set + /// tag_value: Ref, // if corresponding bit is set + /// } + pub const UnionDecl = struct { + pub const Small = packed struct { + has_src_node: bool, + has_tag_type: bool, + has_body_len: bool, + has_fields_len: bool, + has_decls_len: bool, + name_strategy: NameStrategy, + layout: std.builtin.Type.ContainerLayout, + /// has_tag_type | auto_enum_tag | result + /// ------------------------------------- + /// false | false | union { } + /// false | true | union(enum) { } + /// true | true | union(enum(T)) { } + /// true | false | union(T) { } + auto_enum_tag: bool, + _: u6 = undefined, + }; + }; + + /// Trailing: + /// 0. src_node: i32, // if has_src_node + /// 1. decls_len: u32, // if has_decls_len + /// 2. decl_bits: u32 // for every 8 decls + /// - sets of 4 bits: + /// 0b000X: whether corresponding decl is pub + /// 0b00X0: whether corresponding decl is exported + /// 0b0X00: whether corresponding decl has an align expression + /// 0bX000: whether corresponding decl has a linksection or an address space expression + /// 3. decl: { // for every decls_len + /// src_hash: [4]u32, // hash of source bytes + /// line: u32, // line number of decl, relative to parent + /// name: u32, // null terminated string index + /// - 0 means comptime or usingnamespace decl. + /// - if name == 0 `is_exported` determines which one: 0=comptime,1=usingnamespace + /// - 1 means test decl with no name. + /// - if there is a 0 byte at the position `name` indexes, it indicates + /// this is a test decl, and the name starts at `name+1`. + /// value: Index, + /// doc_comment: u32, // 0 if no doc comment, + /// align: Ref, // if corresponding bit is set + /// link_section_or_address_space: { // if corresponding bit is set. + /// link_section: Ref, + /// address_space: Ref, + /// } + /// } + pub const OpaqueDecl = struct { + pub const Small = packed struct { + has_src_node: bool, + has_decls_len: bool, + name_strategy: NameStrategy, + _: u12 = undefined, + }; + }; + + /// Trailing: + /// { // for every fields_len + /// field_name: u32 // null terminated string index + /// doc_comment: u32 // null terminated string index + /// } + pub const ErrorSetDecl = struct { + fields_len: u32, + }; + + /// A f128 value, broken up into 4 u32 parts. + pub const Float128 = struct { + piece0: u32, + piece1: u32, + piece2: u32, + piece3: u32, + + pub fn get(self: Float128) f128 { + const int_bits = @as(u128, self.piece0) | + (@as(u128, self.piece1) << 32) | + (@as(u128, self.piece2) << 64) | + (@as(u128, self.piece3) << 96); + return @bitCast(f128, int_bits); + } + }; + + /// Trailing is an item per field. + pub const StructInit = struct { + fields_len: u32, + + pub const Item = struct { + /// The `field_type` ZIR instruction for this field init. + field_type: Index, + /// The field init expression to be used as the field value. + init: Ref, + }; + }; + + /// Trailing is an Item per field. + /// TODO make this instead array of inits followed by array of names because + /// it will be simpler Sema code and better for CPU cache. + pub const StructInitAnon = struct { + fields_len: u32, + + pub const Item = struct { + /// Null-terminated string table index. + field_name: u32, + /// The field init expression to be used as the field value. + init: Ref, + }; + }; + + pub const FieldType = struct { + container_type: Ref, + /// Offset into `string_bytes`, null terminated. + name_start: u32, + }; + + pub const FieldTypeRef = struct { + container_type: Ref, + field_name: Ref, + }; + + pub const Cmpxchg = struct { + node: i32, + ptr: Ref, + expected_value: Ref, + new_value: Ref, + success_order: Ref, + failure_order: Ref, + }; + + pub const AtomicRmw = struct { + ptr: Ref, + operation: Ref, + operand: Ref, + ordering: Ref, + }; + + pub const UnionInit = struct { + union_type: Ref, + field_name: Ref, + init: Ref, + }; + + pub const AtomicStore = struct { + ptr: Ref, + operand: Ref, + ordering: Ref, + }; + + pub const AtomicLoad = struct { + elem_type: Ref, + ptr: Ref, + ordering: Ref, + }; + + pub const MulAdd = struct { + mulend1: Ref, + mulend2: Ref, + addend: Ref, + }; + + pub const FieldParentPtr = struct { + parent_type: Ref, + field_name: Ref, + field_ptr: Ref, + }; + + pub const Memcpy = struct { + dest: Ref, + source: Ref, + byte_count: Ref, + }; + + pub const Memset = struct { + dest: Ref, + byte: Ref, + byte_count: Ref, + }; + + pub const Shuffle = struct { + elem_type: Ref, + a: Ref, + b: Ref, + mask: Ref, + }; + + pub const Select = struct { + node: i32, + elem_type: Ref, + pred: Ref, + a: Ref, + b: Ref, + }; + + pub const AsyncCall = struct { + node: i32, + frame_buffer: Ref, + result_ptr: Ref, + fn_ptr: Ref, + args: Ref, + }; + + /// Trailing: inst: Index // for every body_len + pub const Param = struct { + /// Null-terminated string index. + name: u32, + /// 0 if no doc comment + doc_comment: u32, + /// The body contains the type of the parameter. + body_len: u32, + }; + + /// Trailing: + /// 0. type_inst: Ref, // if small 0b000X is set + /// 1. align_inst: Ref, // if small 0b00X0 is set + pub const AllocExtended = struct { + src_node: i32, + + pub const Small = packed struct { + has_type: bool, + has_align: bool, + is_const: bool, + is_comptime: bool, + _: u12 = undefined, + }; + }; + + pub const Export = struct { + /// If present, this is referring to a Decl via field access, e.g. `a.b`. + /// If omitted, this is referring to a Decl via identifier, e.g. `a`. + namespace: Ref, + /// Null-terminated string index. + decl_name: u32, + options: Ref, + }; + + pub const ExportValue = struct { + /// The comptime value to export. + operand: Ref, + options: Ref, + }; + + /// Trailing: `CompileErrors.Item` for each `items_len`. + pub const CompileErrors = struct { + items_len: u32, + + /// Trailing: `note_payload_index: u32` for each `notes_len`. + /// It's a payload index of another `Item`. + pub const Item = struct { + /// null terminated string index + msg: u32, + node: Ast.Node.Index, + /// If node is 0 then this will be populated. + token: Ast.TokenIndex, + /// Can be used in combination with `token`. + byte_offset: u32, + /// 0 or a payload index of a `Block`, each is a payload + /// index of another `Item`. + notes: u32, + + pub fn notesLen(item: Item, zir: Zir) u32 { + if (item.notes == 0) return 0; + const block = zir.extraData(Block, item.notes); + return block.data.body_len; + } + }; + }; + + /// Trailing: for each `imports_len` there is an Item + pub const Imports = struct { + imports_len: Inst.Index, + + pub const Item = struct { + /// null terminated string index + name: u32, + /// points to the import name + token: Ast.TokenIndex, + }; + }; + + pub const LineColumn = struct { + line: u32, + column: u32, + }; + + pub const ArrayInit = struct { + ty: Ref, + init_count: u32, + }; + + pub const Src = struct { + node: i32, + line: u32, + column: u32, + }; + + pub const DeferErrCode = struct { + remapped_err_code: Index, + index: u32, + len: u32, + }; +}; + +pub const SpecialProng = enum { none, @"else", under }; + +pub const DeclIterator = struct { + extra_index: usize, + bit_bag_index: usize, + cur_bit_bag: u32, + decl_i: u32, + decls_len: u32, + zir: Zir, + + pub const Item = struct { + name: [:0]const u8, + sub_index: u32, + }; + + pub fn next(it: *DeclIterator) ?Item { + if (it.decl_i >= it.decls_len) return null; + + if (it.decl_i % 8 == 0) { + it.cur_bit_bag = it.zir.extra[it.bit_bag_index]; + it.bit_bag_index += 1; + } + it.decl_i += 1; + + const flags = @truncate(u4, it.cur_bit_bag); + it.cur_bit_bag >>= 4; + + const sub_index = @intCast(u32, it.extra_index); + it.extra_index += 5; // src_hash(4) + line(1) + const name = it.zir.nullTerminatedString(it.zir.extra[it.extra_index]); + it.extra_index += 3; // name(1) + value(1) + doc_comment(1) + it.extra_index += @truncate(u1, flags >> 2); + it.extra_index += @truncate(u1, flags >> 3); + + return Item{ + .sub_index = sub_index, + .name = name, + }; + } +}; + +pub fn declIterator(zir: Zir, decl_inst: u32) DeclIterator { + const tags = zir.instructions.items(.tag); + const datas = zir.instructions.items(.data); + switch (tags[decl_inst]) { + // Functions are allowed and yield no iterations. + // There is one case matching this in the extended instruction set below. + .func, .func_inferred, .func_fancy => return declIteratorInner(zir, 0, 0), + + .extended => { + const extended = datas[decl_inst].extended; + switch (extended.opcode) { + .struct_decl => { + const small = @bitCast(Inst.StructDecl.Small, extended.small); + var extra_index: usize = extended.operand; + extra_index += @boolToInt(small.has_src_node); + extra_index += @boolToInt(small.has_fields_len); + const decls_len = if (small.has_decls_len) decls_len: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :decls_len decls_len; + } else 0; + + if (small.has_backing_int) { + const backing_int_body_len = zir.extra[extra_index]; + extra_index += 1; // backing_int_body_len + if (backing_int_body_len == 0) { + extra_index += 1; // backing_int_ref + } else { + extra_index += backing_int_body_len; // backing_int_body_inst + } + } + + return declIteratorInner(zir, extra_index, decls_len); + }, + .enum_decl => { + const small = @bitCast(Inst.EnumDecl.Small, extended.small); + var extra_index: usize = extended.operand; + extra_index += @boolToInt(small.has_src_node); + extra_index += @boolToInt(small.has_tag_type); + extra_index += @boolToInt(small.has_body_len); + extra_index += @boolToInt(small.has_fields_len); + const decls_len = if (small.has_decls_len) decls_len: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :decls_len decls_len; + } else 0; + + return declIteratorInner(zir, extra_index, decls_len); + }, + .union_decl => { + const small = @bitCast(Inst.UnionDecl.Small, extended.small); + var extra_index: usize = extended.operand; + extra_index += @boolToInt(small.has_src_node); + extra_index += @boolToInt(small.has_tag_type); + extra_index += @boolToInt(small.has_body_len); + extra_index += @boolToInt(small.has_fields_len); + const decls_len = if (small.has_decls_len) decls_len: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :decls_len decls_len; + } else 0; + + return declIteratorInner(zir, extra_index, decls_len); + }, + .opaque_decl => { + const small = @bitCast(Inst.OpaqueDecl.Small, extended.small); + var extra_index: usize = extended.operand; + extra_index += @boolToInt(small.has_src_node); + const decls_len = if (small.has_decls_len) decls_len: { + const decls_len = zir.extra[extra_index]; + extra_index += 1; + break :decls_len decls_len; + } else 0; + + return declIteratorInner(zir, extra_index, decls_len); + }, + else => unreachable, + } + }, + else => unreachable, + } +} + +pub fn declIteratorInner(zir: Zir, extra_index: usize, decls_len: u32) DeclIterator { + const bit_bags_count = std.math.divCeil(usize, decls_len, 8) catch unreachable; + return .{ + .zir = zir, + .extra_index = extra_index + bit_bags_count, + .bit_bag_index = extra_index, + .cur_bit_bag = undefined, + .decl_i = 0, + .decls_len = decls_len, + }; +} + +/// The iterator would have to allocate memory anyway to iterate. So here we populate +/// an ArrayList as the result. +pub fn findDecls(zir: Zir, list: *std.ArrayList(Inst.Index), decl_sub_index: u32) !void { + const block_inst = zir.extra[decl_sub_index + 6]; + list.clearRetainingCapacity(); + + return zir.findDeclsInner(list, block_inst); +} + +fn findDeclsInner( + zir: Zir, + list: *std.ArrayList(Inst.Index), + inst: Inst.Index, +) Allocator.Error!void { + const tags = zir.instructions.items(.tag); + const datas = zir.instructions.items(.data); + + switch (tags[inst]) { + // Functions instructions are interesting and have a body. + .func, + .func_inferred, + => { + try list.append(inst); + + const inst_data = datas[inst].pl_node; + const extra = zir.extraData(Inst.Func, inst_data.payload_index); + var extra_index: usize = extra.end; + switch (extra.data.ret_body_len) { + 0 => {}, + 1 => extra_index += 1, + else => { + const body = zir.extra[extra_index..][0..extra.data.ret_body_len]; + extra_index += body.len; + try zir.findDeclsBody(list, body); + }, + } + const body = zir.extra[extra_index..][0..extra.data.body_len]; + return zir.findDeclsBody(list, body); + }, + .func_fancy => { + try list.append(inst); + + const inst_data = datas[inst].pl_node; + const extra = zir.extraData(Inst.FuncFancy, inst_data.payload_index); + var extra_index: usize = extra.end; + extra_index += @boolToInt(extra.data.bits.has_lib_name); + + if (extra.data.bits.has_align_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1; + const body = zir.extra[extra_index..][0..body_len]; + try zir.findDeclsBody(list, body); + extra_index += body.len; + } else if (extra.data.bits.has_align_ref) { + extra_index += 1; + } + + if (extra.data.bits.has_addrspace_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1; + const body = zir.extra[extra_index..][0..body_len]; + try zir.findDeclsBody(list, body); + extra_index += body.len; + } else if (extra.data.bits.has_addrspace_ref) { + extra_index += 1; + } + + if (extra.data.bits.has_section_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1; + const body = zir.extra[extra_index..][0..body_len]; + try zir.findDeclsBody(list, body); + extra_index += body.len; + } else if (extra.data.bits.has_section_ref) { + extra_index += 1; + } + + if (extra.data.bits.has_cc_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1; + const body = zir.extra[extra_index..][0..body_len]; + try zir.findDeclsBody(list, body); + extra_index += body.len; + } else if (extra.data.bits.has_cc_ref) { + extra_index += 1; + } + + if (extra.data.bits.has_ret_ty_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1; + const body = zir.extra[extra_index..][0..body_len]; + try zir.findDeclsBody(list, body); + extra_index += body.len; + } else if (extra.data.bits.has_ret_ty_ref) { + extra_index += 1; + } + + extra_index += @boolToInt(extra.data.bits.has_any_noalias); + + const body = zir.extra[extra_index..][0..extra.data.body_len]; + return zir.findDeclsBody(list, body); + }, + .extended => { + const extended = datas[inst].extended; + switch (extended.opcode) { + + // Decl instructions are interesting but have no body. + // TODO yes they do have a body actually. recurse over them just like block instructions. + .struct_decl, + .union_decl, + .enum_decl, + .opaque_decl, + => return list.append(inst), + + else => return, + } + }, + + // Block instructions, recurse over the bodies. + + .block, .block_inline => { + const inst_data = datas[inst].pl_node; + const extra = zir.extraData(Inst.Block, inst_data.payload_index); + const body = zir.extra[extra.end..][0..extra.data.body_len]; + return zir.findDeclsBody(list, body); + }, + .condbr, .condbr_inline => { + const inst_data = datas[inst].pl_node; + const extra = zir.extraData(Inst.CondBr, inst_data.payload_index); + const then_body = zir.extra[extra.end..][0..extra.data.then_body_len]; + const else_body = zir.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; + try zir.findDeclsBody(list, then_body); + try zir.findDeclsBody(list, else_body); + }, + .@"try", .try_ptr => { + const inst_data = datas[inst].pl_node; + const extra = zir.extraData(Inst.Try, inst_data.payload_index); + const body = zir.extra[extra.end..][0..extra.data.body_len]; + try zir.findDeclsBody(list, body); + }, + .switch_block => return findDeclsSwitch(zir, list, inst), + + .suspend_block => @panic("TODO iterate suspend block"), + + else => return, // Regular instruction, not interesting. + } +} + +fn findDeclsSwitch( + zir: Zir, + list: *std.ArrayList(Inst.Index), + inst: Inst.Index, +) Allocator.Error!void { + const inst_data = zir.instructions.items(.data)[inst].pl_node; + const extra = zir.extraData(Inst.SwitchBlock, inst_data.payload_index); + + var extra_index: usize = extra.end; + + const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: { + const multi_cases_len = zir.extra[extra_index]; + extra_index += 1; + break :blk multi_cases_len; + } else 0; + + const special_prong = extra.data.bits.specialProng(); + if (special_prong != .none) { + const body_len = @truncate(u31, zir.extra[extra_index]); + extra_index += 1; + const body = zir.extra[extra_index..][0..body_len]; + extra_index += body.len; + + try zir.findDeclsBody(list, body); + } + + { + const scalar_cases_len = extra.data.bits.scalar_cases_len; + var scalar_i: usize = 0; + while (scalar_i < scalar_cases_len) : (scalar_i += 1) { + extra_index += 1; + const body_len = @truncate(u31, zir.extra[extra_index]); + extra_index += 1; + const body = zir.extra[extra_index..][0..body_len]; + extra_index += body_len; + + try zir.findDeclsBody(list, body); + } + } + { + var multi_i: usize = 0; + while (multi_i < multi_cases_len) : (multi_i += 1) { + const items_len = zir.extra[extra_index]; + extra_index += 1; + const ranges_len = zir.extra[extra_index]; + extra_index += 1; + const body_len = @truncate(u31, zir.extra[extra_index]); + extra_index += 1; + const items = zir.refSlice(extra_index, items_len); + extra_index += items_len; + _ = items; + + var range_i: usize = 0; + while (range_i < ranges_len) : (range_i += 1) { + extra_index += 1; + extra_index += 1; + } + + const body = zir.extra[extra_index..][0..body_len]; + extra_index += body_len; + + try zir.findDeclsBody(list, body); + } + } +} + +fn findDeclsBody( + zir: Zir, + list: *std.ArrayList(Inst.Index), + body: []const Inst.Index, +) Allocator.Error!void { + for (body) |member| { + try zir.findDeclsInner(list, member); + } +} + +pub const FnInfo = struct { + param_body: []const Inst.Index, + param_body_inst: Inst.Index, + ret_ty_body: []const Inst.Index, + body: []const Inst.Index, + ret_ty_ref: Zir.Inst.Ref, + total_params_len: u32, +}; + +pub fn getParamBody(zir: Zir, fn_inst: Inst.Index) []const u32 { + const tags = zir.instructions.items(.tag); + const datas = zir.instructions.items(.data); + const inst_data = datas[fn_inst].pl_node; + + const param_block_index = switch (tags[fn_inst]) { + .func, .func_inferred => blk: { + const extra = zir.extraData(Inst.Func, inst_data.payload_index); + break :blk extra.data.param_block; + }, + .func_fancy => blk: { + const extra = zir.extraData(Inst.FuncFancy, inst_data.payload_index); + break :blk extra.data.param_block; + }, + else => unreachable, + }; + + const param_block = zir.extraData(Inst.Block, datas[param_block_index].pl_node.payload_index); + return zir.extra[param_block.end..][0..param_block.data.body_len]; +} + +pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo { + const tags = zir.instructions.items(.tag); + const datas = zir.instructions.items(.data); + const info: struct { + param_block: Inst.Index, + body: []const Inst.Index, + ret_ty_ref: Inst.Ref, + ret_ty_body: []const Inst.Index, + } = switch (tags[fn_inst]) { + .func, .func_inferred => blk: { + const inst_data = datas[fn_inst].pl_node; + const extra = zir.extraData(Inst.Func, inst_data.payload_index); + + var extra_index: usize = extra.end; + var ret_ty_ref: Inst.Ref = .none; + var ret_ty_body: []const Inst.Index = &.{}; + + switch (extra.data.ret_body_len) { + 0 => { + ret_ty_ref = .void_type; + }, + 1 => { + ret_ty_ref = @intToEnum(Inst.Ref, zir.extra[extra_index]); + extra_index += 1; + }, + else => { + ret_ty_body = zir.extra[extra_index..][0..extra.data.ret_body_len]; + extra_index += ret_ty_body.len; + }, + } + + const body = zir.extra[extra_index..][0..extra.data.body_len]; + extra_index += body.len; + + break :blk .{ + .param_block = extra.data.param_block, + .ret_ty_ref = ret_ty_ref, + .ret_ty_body = ret_ty_body, + .body = body, + }; + }, + .func_fancy => blk: { + const inst_data = datas[fn_inst].pl_node; + const extra = zir.extraData(Inst.FuncFancy, inst_data.payload_index); + + var extra_index: usize = extra.end; + var ret_ty_ref: Inst.Ref = .void_type; + var ret_ty_body: []const Inst.Index = &.{}; + + extra_index += @boolToInt(extra.data.bits.has_lib_name); + if (extra.data.bits.has_align_body) { + extra_index += zir.extra[extra_index] + 1; + } else if (extra.data.bits.has_align_ref) { + extra_index += 1; + } + if (extra.data.bits.has_addrspace_body) { + extra_index += zir.extra[extra_index] + 1; + } else if (extra.data.bits.has_addrspace_ref) { + extra_index += 1; + } + if (extra.data.bits.has_section_body) { + extra_index += zir.extra[extra_index] + 1; + } else if (extra.data.bits.has_section_ref) { + extra_index += 1; + } + if (extra.data.bits.has_cc_body) { + extra_index += zir.extra[extra_index] + 1; + } else if (extra.data.bits.has_cc_ref) { + extra_index += 1; + } + if (extra.data.bits.has_ret_ty_body) { + const body_len = zir.extra[extra_index]; + extra_index += 1; + ret_ty_body = zir.extra[extra_index..][0..body_len]; + extra_index += ret_ty_body.len; + } else if (extra.data.bits.has_ret_ty_ref) { + ret_ty_ref = @intToEnum(Inst.Ref, zir.extra[extra_index]); + extra_index += 1; + } + + extra_index += @boolToInt(extra.data.bits.has_any_noalias); + + const body = zir.extra[extra_index..][0..extra.data.body_len]; + extra_index += body.len; + break :blk .{ + .param_block = extra.data.param_block, + .ret_ty_ref = ret_ty_ref, + .ret_ty_body = ret_ty_body, + .body = body, + }; + }, + else => unreachable, + }; + assert(tags[info.param_block] == .block or tags[info.param_block] == .block_inline); + const param_block = zir.extraData(Inst.Block, datas[info.param_block].pl_node.payload_index); + const param_body = zir.extra[param_block.end..][0..param_block.data.body_len]; + var total_params_len: u32 = 0; + for (param_body) |inst| { + switch (tags[inst]) { + .param, .param_comptime, .param_anytype, .param_anytype_comptime => { + total_params_len += 1; + }, + else => continue, + } + } + return .{ + .param_body = param_body, + .param_body_inst = info.param_block, + .ret_ty_body = info.ret_ty_body, + .ret_ty_ref = info.ret_ty_ref, + .body = info.body, + .total_params_len = total_params_len, + }; +} + +const ref_start_index: u32 = @enumToInt(Inst.Ref.ref_start_index); + +pub fn indexToRef(inst: Inst.Index) Inst.Ref { + return @intToEnum(Inst.Ref, ref_start_index + inst); +} + +pub fn refToIndex(inst: Inst.Ref) ?Inst.Index { + const ref_int = @enumToInt(inst); + if (ref_int >= ref_start_index) { + return ref_int - ref_start_index; + } else { + return null; + } +} From 4215edbc338bf9dd0135d28532a6254faba650d7 Mon Sep 17 00:00:00 2001 From: Techarix <19954306+Techatrix@users.noreply.github.com> Date: Tue, 14 Feb 2023 22:02:37 +0100 Subject: [PATCH 2/3] add support for running ast-check without zig --- README.md | 1 + schema.json | 5 + src/Config.zig | 3 + src/DocumentStore.zig | 41 ++++ src/Server.zig | 314 ++-------------------------- src/config_gen/config.json | 6 + src/features/diagnostics.zig | 391 +++++++++++++++++++++++++++++++++++ 7 files changed, 467 insertions(+), 294 deletions(-) create mode 100644 src/features/diagnostics.zig diff --git a/README.md b/README.md index 4de9887..3b9f774 100644 --- a/README.md +++ b/README.md @@ -79,6 +79,7 @@ The following options are currently available. | `include_at_in_builtins` | `bool` | `false` | Whether the @ sign should be part of the completion of builtins | | `skip_std_references` | `bool` | `false` | When true, skips searching for references in std. Improves lookup speed for functions in user's code. Renaming and go-to-definition will continue to work as is | | `max_detail_length` | `usize` | `1048576` | The detail field of completions is truncated to be no longer than this (in bytes) | +| `prefer_ast_check_as_child_process` | `bool` | `true` | Can be used in conjuction with `enable_ast_check_diagnostics` to favor using `zig ast-check` instead of ZLS's fork | | `record_session` | `bool` | `false` | When true, zls will record all request is receives and write in into `record_session_path`, so that they can replayed with `zls replay` | | `record_session_path` | `?[]const u8` | `null` | Output file path when `record_session` is set. The recommended file extension *.zlsreplay | | `replay_session_path` | `?[]const u8` | `null` | Used when calling `zls replay` for specifying the replay file. If no extra argument is given `record_session_path` is used as the default path. | diff --git a/schema.json b/schema.json index 894e8ba..59931d2 100644 --- a/schema.json +++ b/schema.json @@ -94,6 +94,11 @@ "type": "integer", "default": "1048576" }, + "prefer_ast_check_as_child_process": { + "description": "Can be used in conjuction with `enable_ast_check_diagnostics` to favor using `zig ast-check` instead of ZLS's fork", + "type": "boolean", + "default": "true" + }, "record_session": { "description": "When true, zls will record all request is receives and write in into `record_session_path`, so that they can replayed with `zls replay`", "type": "boolean", diff --git a/src/Config.zig b/src/Config.zig index c198a24..d9fd7cf 100644 --- a/src/Config.zig +++ b/src/Config.zig @@ -61,6 +61,9 @@ skip_std_references: bool = false, /// The detail field of completions is truncated to be no longer than this (in bytes) max_detail_length: usize = 1048576, +/// Can be used in conjuction with `enable_ast_check_diagnostics` to favor using `zig ast-check` instead of ZLS's fork +prefer_ast_check_as_child_process: bool = true, + /// When true, zls will record all request is receives and write in into `record_session_path`, so that they can replayed with `zls replay` record_session: bool = false, diff --git a/src/DocumentStore.zig b/src/DocumentStore.zig index ee90de1..1da039e 100644 --- a/src/DocumentStore.zig +++ b/src/DocumentStore.zig @@ -13,6 +13,8 @@ const Config = @import("Config.zig"); const ZigVersionWrapper = @import("ZigVersionWrapper.zig"); const translate_c = @import("translate_c.zig"); const ComptimeInterpreter = @import("ComptimeInterpreter.zig"); +const AstGen = @import("stage2/AstGen.zig"); +const Zir = @import("stage2/Zir.zig"); const DocumentStore = @This(); @@ -56,6 +58,13 @@ pub const Handle = struct { uri: Uri, text: [:0]const u8, tree: Ast, + /// do not access unless `zir_status != .none` + zir: Zir = undefined, + zir_status: enum { + none, + outdated, + done, + } = .none, /// Not null if a ComptimeInterpreter is actually used interpreter: ?*ComptimeInterpreter = null, document_scope: analysis.DocumentScope, @@ -74,6 +83,7 @@ pub const Handle = struct { allocator.destroy(interpreter); } self.document_scope.deinit(allocator); + if (self.zir_status != .none) self.zir.deinit(allocator); self.tree.deinit(allocator); allocator.free(self.text); allocator.free(self.uri); @@ -214,6 +224,15 @@ pub fn refreshDocument(self: *DocumentStore, uri: Uri, new_text: [:0]const u8) ! handle.tree.deinit(self.allocator); handle.tree = new_tree; + if (self.wantZir() and handle.open and new_tree.errors.len == 0) { + const new_zir = try AstGen.generate(self.allocator, new_tree); + if (handle.zir_status != .none) handle.zir.deinit(self.allocator); + handle.zir = new_zir; + handle.zir_status = .done; + } else if (handle.zir_status == .done) { + handle.zir_status = .outdated; + } + var new_document_scope = try analysis.makeDocumentScope(self.allocator, handle.tree); handle.document_scope.deinit(self.allocator); handle.document_scope = new_document_scope; @@ -696,17 +715,31 @@ fn createDocument(self: *DocumentStore, uri: Uri, text: [:0]const u8, open: bool var tree = try Ast.parse(self.allocator, text, .zig); errdefer tree.deinit(self.allocator); + // remove unused capacity var nodes = tree.nodes.toMultiArrayList(); try nodes.setCapacity(self.allocator, nodes.len); tree.nodes = nodes.slice(); + // remove unused capacity var tokens = tree.tokens.toMultiArrayList(); try tokens.setCapacity(self.allocator, tokens.len); tree.tokens = tokens.slice(); + const generate_zir = self.wantZir() and open and tree.errors.len == 0; + var zir: ?Zir = if (generate_zir) try AstGen.generate(self.allocator, tree) else null; + errdefer if (zir) |*code| code.deinit(self.allocator); + + // remove unused capacity + if (zir) |*code| { + var instructions = code.instructions.toMultiArrayList(); + try instructions.setCapacity(self.allocator, instructions.len); + code.instructions = instructions.slice(); + } + var document_scope = try analysis.makeDocumentScope(self.allocator, tree); errdefer document_scope.deinit(self.allocator); + // remove unused capacity try document_scope.scopes.setCapacity(self.allocator, document_scope.scopes.len); break :blk Handle{ @@ -714,6 +747,8 @@ fn createDocument(self: *DocumentStore, uri: Uri, text: [:0]const u8, open: bool .uri = duped_uri, .text = text, .tree = tree, + .zir = if (zir) |code| code else undefined, + .zir_status = if (zir != null) .done else .none, .document_scope = document_scope, }; }; @@ -1082,6 +1117,12 @@ pub fn enumCompletionItems(self: DocumentStore, arena: std.mem.Allocator, handle return try self.tagStoreCompletionItems(arena, handle, "enum_completions"); } +pub fn wantZir(self: DocumentStore) bool { + if (!self.config.enable_ast_check_diagnostics) return false; + const can_run_ast_check = std.process.can_spawn and self.config.zig_exe_path != null and self.config.prefer_ast_check_as_child_process; + return !can_run_ast_check; +} + pub fn ensureInterpreterExists(self: *DocumentStore, uri: Uri) !*ComptimeInterpreter { var handle = self.handles.get(uri).?; if (handle.interpreter != null) return handle.interpreter.?; diff --git a/src/Server.zig b/src/Server.zig index 17fb4b2..e2f31dd 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -29,6 +29,7 @@ const completions = @import("features/completions.zig"); const goto = @import("features/goto.zig"); const hover_handler = @import("features/hover.zig"); const selection_range = @import("features/selection_range.zig"); +const diagnostics_gen = @import("features/diagnostics.zig"); const tres = @import("tres"); @@ -209,285 +210,6 @@ fn showMessage( }); } -fn generateDiagnostics(server: *Server, handle: DocumentStore.Handle) error{OutOfMemory}!types.PublishDiagnosticsParams { - const tracy_zone = tracy.trace(@src()); - defer tracy_zone.end(); - - std.debug.assert(server.client_capabilities.supports_publish_diagnostics); - - const tree = handle.tree; - - var allocator = server.arena.allocator(); - var diagnostics = std.ArrayListUnmanaged(types.Diagnostic){}; - - for (tree.errors) |err| { - var mem_buffer: [256]u8 = undefined; - var fbs = std.io.fixedBufferStream(&mem_buffer); - tree.renderError(err, fbs.writer()) catch if (std.debug.runtime_safety) unreachable else continue; // if an error occurs here increase buffer size - - try diagnostics.append(allocator, .{ - .range = offsets.tokenToRange(tree, err.token, server.offset_encoding), - .severity = .Error, - .code = .{ .string = @tagName(err.tag) }, - .source = "zls", - .message = try server.arena.allocator().dupe(u8, fbs.getWritten()), - // .relatedInformation = undefined - }); - } - - if (server.config.enable_ast_check_diagnostics and tree.errors.len == 0) { - getAstCheckDiagnostics(server, handle, &diagnostics) catch |err| { - log.err("failed to run ast-check: {}", .{err}); - }; - } - - if (server.config.warn_style) { - var node: u32 = 0; - while (node < tree.nodes.len) : (node += 1) { - if (ast.isBuiltinCall(tree, node)) { - const builtin_token = tree.nodes.items(.main_token)[node]; - const call_name = tree.tokenSlice(builtin_token); - - if (!std.mem.eql(u8, call_name, "@import")) continue; - - var buffer: [2]Ast.Node.Index = undefined; - const params = ast.builtinCallParams(tree, node, &buffer).?; - - if (params.len != 1) continue; - - const import_str_token = tree.nodes.items(.main_token)[params[0]]; - const import_str = tree.tokenSlice(import_str_token); - - if (std.mem.startsWith(u8, import_str, "\"./")) { - try diagnostics.append(allocator, .{ - .range = offsets.tokenToRange(tree, import_str_token, server.offset_encoding), - .severity = .Hint, - .code = .{ .string = "dot_slash_import" }, - .source = "zls", - .message = "A ./ is not needed in imports", - }); - } - } - } - - // TODO: style warnings for types, values and declarations below root scope - if (tree.errors.len == 0) { - for (tree.rootDecls()) |decl_idx| { - const decl = tree.nodes.items(.tag)[decl_idx]; - switch (decl) { - .fn_proto, - .fn_proto_multi, - .fn_proto_one, - .fn_proto_simple, - .fn_decl, - => blk: { - var buf: [1]Ast.Node.Index = undefined; - const func = tree.fullFnProto(&buf, decl_idx).?; - if (func.extern_export_inline_token != null) break :blk; - - if (func.name_token) |name_token| { - const is_type_function = Analyser.isTypeFunction(tree, func); - - const func_name = tree.tokenSlice(name_token); - if (!is_type_function and !Analyser.isCamelCase(func_name)) { - try diagnostics.append(allocator, .{ - .range = offsets.tokenToRange(tree, name_token, server.offset_encoding), - .severity = .Hint, - .code = .{ .string = "bad_style" }, - .source = "zls", - .message = "Functions should be camelCase", - }); - } else if (is_type_function and !Analyser.isPascalCase(func_name)) { - try diagnostics.append(allocator, .{ - .range = offsets.tokenToRange(tree, name_token, server.offset_encoding), - .severity = .Hint, - .code = .{ .string = "bad_style" }, - .source = "zls", - .message = "Type functions should be PascalCase", - }); - } - } - }, - else => {}, - } - } - } - } - - for (handle.cimports.items(.hash), handle.cimports.items(.node)) |hash, node| { - const result = server.document_store.cimports.get(hash) orelse continue; - if (result != .failure) continue; - const stderr = std.mem.trim(u8, result.failure, " "); - - var pos_and_diag_iterator = std.mem.split(u8, stderr, ":"); - _ = pos_and_diag_iterator.next(); // skip file path - _ = pos_and_diag_iterator.next(); // skip line - _ = pos_and_diag_iterator.next(); // skip character - - try diagnostics.append(allocator, .{ - .range = offsets.nodeToRange(handle.tree, node, server.offset_encoding), - .severity = .Error, - .code = .{ .string = "cImport" }, - .source = "zls", - .message = try allocator.dupe(u8, pos_and_diag_iterator.rest()), - }); - } - - if (server.config.highlight_global_var_declarations) { - const main_tokens = tree.nodes.items(.main_token); - const tags = tree.tokens.items(.tag); - for (tree.rootDecls()) |decl| { - const decl_tag = tree.nodes.items(.tag)[decl]; - const decl_main_token = tree.nodes.items(.main_token)[decl]; - - switch (decl_tag) { - .simple_var_decl, - .aligned_var_decl, - .local_var_decl, - .global_var_decl, - => { - if (tags[main_tokens[decl]] != .keyword_var) continue; // skip anything immutable - // uncomment this to get a list :) - //log.debug("possible global variable \"{s}\"", .{tree.tokenSlice(decl_main_token + 1)}); - try diagnostics.append(allocator, .{ - .range = offsets.tokenToRange(tree, decl_main_token, server.offset_encoding), - .severity = .Hint, - .code = .{ .string = "highlight_global_var_declarations" }, - .source = "zls", - .message = "Global var declaration", - }); - }, - else => {}, - } - } - } - - if (handle.interpreter) |int| { - try diagnostics.ensureUnusedCapacity(allocator, int.errors.count()); - - var err_it = int.errors.iterator(); - - while (err_it.next()) |err| { - diagnostics.appendAssumeCapacity(.{ - .range = offsets.nodeToRange(tree, err.key_ptr.*, server.offset_encoding), - .severity = .Error, - .code = .{ .string = err.value_ptr.code }, - .source = "zls", - .message = err.value_ptr.message, - }); - } - } - // try diagnostics.appendSlice(allocator, handle.interpreter.?.diagnostics.items); - - return .{ - .uri = handle.uri, - .diagnostics = diagnostics.items, - }; -} - -fn getAstCheckDiagnostics( - server: *Server, - handle: DocumentStore.Handle, - diagnostics: *std.ArrayListUnmanaged(types.Diagnostic), -) !void { - var allocator = server.arena.allocator(); - - const zig_exe_path = server.config.zig_exe_path orelse return; - - var process = std.ChildProcess.init(&[_][]const u8{ zig_exe_path, "ast-check", "--color", "off" }, server.allocator); - process.stdin_behavior = .Pipe; - process.stderr_behavior = .Pipe; - - process.spawn() catch |err| { - log.warn("Failed to spawn zig ast-check process, error: {}", .{err}); - return; - }; - try process.stdin.?.writeAll(handle.text); - process.stdin.?.close(); - - process.stdin = null; - - const stderr_bytes = try process.stderr.?.reader().readAllAlloc(server.allocator, std.math.maxInt(usize)); - defer server.allocator.free(stderr_bytes); - - const term = process.wait() catch |err| { - log.warn("Failed to await zig ast-check process, error: {}", .{err}); - return; - }; - - if (term != .Exited) return; - - var last_diagnostic: ?types.Diagnostic = null; - // we don't store DiagnosticRelatedInformation in last_diagnostic instead - // its stored in last_related_diagnostics because we need an ArrayList - var last_related_diagnostics: std.ArrayListUnmanaged(types.DiagnosticRelatedInformation) = .{}; - - // NOTE: I believe that with color off it's one diag per line; is this correct? - var line_iterator = std.mem.split(u8, stderr_bytes, "\n"); - - while (line_iterator.next()) |line| lin: { - if (!std.mem.startsWith(u8, line, "")) continue; - - var pos_and_diag_iterator = std.mem.split(u8, line, ":"); - const maybe_first = pos_and_diag_iterator.next(); - if (maybe_first) |first| { - if (first.len <= 1) break :lin; - } else break; - - const utf8_position = types.Position{ - .line = (try std.fmt.parseInt(u32, pos_and_diag_iterator.next().?, 10)) - 1, - .character = (try std.fmt.parseInt(u32, pos_and_diag_iterator.next().?, 10)) - 1, - }; - - // zig uses utf-8 encoding for character offsets - const position = offsets.convertPositionEncoding(handle.text, utf8_position, .@"utf-8", server.offset_encoding); - const range = offsets.tokenPositionToRange(handle.text, position, server.offset_encoding); - - const msg = pos_and_diag_iterator.rest()[1..]; - - if (std.mem.startsWith(u8, msg, "note: ")) { - try last_related_diagnostics.append(allocator, .{ - .location = .{ - .uri = handle.uri, - .range = range, - }, - .message = try server.arena.allocator().dupe(u8, msg["note: ".len..]), - }); - continue; - } - - if (last_diagnostic) |*diagnostic| { - diagnostic.relatedInformation = try last_related_diagnostics.toOwnedSlice(allocator); - try diagnostics.append(allocator, diagnostic.*); - last_diagnostic = null; - } - - if (std.mem.startsWith(u8, msg, "error: ")) { - last_diagnostic = types.Diagnostic{ - .range = range, - .severity = .Error, - .code = .{ .string = "ast_check" }, - .source = "zls", - .message = try server.arena.allocator().dupe(u8, msg["error: ".len..]), - }; - } else { - last_diagnostic = types.Diagnostic{ - .range = range, - .severity = .Error, - .code = .{ .string = "ast_check" }, - .source = "zls", - .message = try server.arena.allocator().dupe(u8, msg), - }; - } - } - - if (last_diagnostic) |*diagnostic| { - diagnostic.relatedInformation = try last_related_diagnostics.toOwnedSlice(allocator); - try diagnostics.append(allocator, diagnostic.*); - last_diagnostic = null; - } -} - fn getAutofixMode(server: *Server) enum { on_save, will_save_wait_until, @@ -507,12 +229,11 @@ fn getAutofixMode(server: *Server) enum { /// caller owns returned memory. pub fn autofix(server: *Server, allocator: std.mem.Allocator, handle: *const DocumentStore.Handle) error{OutOfMemory}!std.ArrayListUnmanaged(types.TextEdit) { if (!server.config.enable_ast_check_diagnostics) return .{}; - if (handle.tree.errors.len != 0) return .{}; + var diagnostics = std.ArrayListUnmanaged(types.Diagnostic){}; - getAstCheckDiagnostics(server, handle.*, &diagnostics) catch |err| { - log.err("failed to run ast-check: {}", .{err}); - }; + try diagnostics_gen.getAstCheckDiagnostics(server, handle.*, &diagnostics); + if (diagnostics.items.len == 0) return .{}; var builder = code_actions.Builder{ .arena = server.arena.allocator(), @@ -765,6 +486,16 @@ fn initializeHandler(server: *Server, request: types.InitializeParams) Error!typ , .{server.config.record_session_path}); } + if (server.config.enable_ast_check_diagnostics and + server.config.prefer_ast_check_as_child_process) + { + if (!std.process.can_spawn) { + log.info("'prefer_ast_check_as_child_process' is ignored because your OS can't spawn a child process", .{}); + } else if (server.config.zig_exe_path == null) { + log.info("'prefer_ast_check_as_child_process' is ignored because Zig could not be found", .{}); + } + } + return .{ .serverInfo = .{ .name = "zls", @@ -1025,7 +756,7 @@ fn openDocumentHandler(server: *Server, notification: types.DidOpenTextDocumentP if (server.client_capabilities.supports_publish_diagnostics) blk: { if (!std.process.can_spawn) break :blk; - const diagnostics = try server.generateDiagnostics(handle); + const diagnostics = try diagnostics_gen.generateDiagnostics(server, handle); server.sendNotification("textDocument/publishDiagnostics", diagnostics); } } @@ -1042,7 +773,7 @@ fn changeDocumentHandler(server: *Server, notification: types.DidChangeTextDocum if (server.client_capabilities.supports_publish_diagnostics) blk: { if (!std.process.can_spawn) break :blk; - const diagnostics = try server.generateDiagnostics(handle.*); + const diagnostics = try diagnostics_gen.generateDiagnostics(server, handle.*); server.sendNotification("textDocument/publishDiagnostics", diagnostics); } } @@ -1054,7 +785,7 @@ fn saveDocumentHandler(server: *Server, notification: types.DidSaveTextDocumentP const handle = server.document_store.getHandle(uri) orelse return; try server.document_store.applySave(handle); - if (std.process.can_spawn and server.getAutofixMode() == .on_save) { + if (server.getAutofixMode() == .on_save) { var text_edits = try server.autofix(allocator, handle); var workspace_edit = types.WorkspaceEdit{ .changes = .{} }; @@ -1082,7 +813,6 @@ fn willSaveWaitUntilHandler(server: *Server, request: types.WillSaveTextDocument const handle = server.document_store.getHandle(request.textDocument.uri) orelse return null; - if (!std.process.can_spawn) return null; var text_edits = try server.autofix(allocator, handle); return try text_edits.toOwnedSlice(allocator); @@ -1185,7 +915,7 @@ pub fn hoverHandler(server: *Server, request: types.HoverParams) Error!?types.Ho // TODO: Figure out a better solution for comptime interpreter diags if (server.client_capabilities.supports_publish_diagnostics) blk: { if (!std.process.can_spawn) break :blk; - const diagnostics = try server.generateDiagnostics(handle.*); + const diagnostics = try diagnostics_gen.generateDiagnostics(server, handle.*); server.sendNotification("textDocument/publishDiagnostics", diagnostics); } @@ -1459,12 +1189,8 @@ fn codeActionHandler(server: *Server, request: types.CodeActionParams) Error!?[] // as of right now, only ast-check errors may get a code action var diagnostics = std.ArrayListUnmanaged(types.Diagnostic){}; - if (server.config.enable_ast_check_diagnostics and handle.tree.errors.len == 0) blk: { - if (!std.process.can_spawn) break :blk; - getAstCheckDiagnostics(server, handle.*, &diagnostics) catch |err| { - log.err("failed to run ast-check: {}", .{err}); - return error.InternalError; - }; + if (server.config.enable_ast_check_diagnostics and handle.tree.errors.len == 0) { + try diagnostics_gen.getAstCheckDiagnostics(server, handle.*, &diagnostics); } var actions = std.ArrayListUnmanaged(types.CodeAction){}; diff --git a/src/config_gen/config.json b/src/config_gen/config.json index 3ac4b16..2b4821d 100644 --- a/src/config_gen/config.json +++ b/src/config_gen/config.json @@ -107,6 +107,12 @@ "type": "usize", "default": "1048576" }, + { + "name": "prefer_ast_check_as_child_process", + "description": "Can be used in conjuction with `enable_ast_check_diagnostics` to favor using `zig ast-check` instead of ZLS's fork", + "type": "bool", + "default": "true" + }, { "name": "record_session", "description": "When true, zls will record all request is receives and write in into `record_session_path`, so that they can replayed with `zls replay`", diff --git a/src/features/diagnostics.zig b/src/features/diagnostics.zig new file mode 100644 index 0000000..6603cd9 --- /dev/null +++ b/src/features/diagnostics.zig @@ -0,0 +1,391 @@ +const std = @import("std"); +const Ast = std.zig.Ast; +const log = std.log.scoped(.zls_diagnostics); + +const Server = @import("../Server.zig"); +const DocumentStore = @import("../DocumentStore.zig"); +const types = @import("../lsp.zig"); +const Analyser = @import("../analysis.zig"); +const ast = @import("../ast.zig"); +const offsets = @import("../offsets.zig"); +const tracy = @import("../tracy.zig"); + +const Module = @import("../stage2/Module.zig"); +const Zir = @import("../stage2/Zir.zig"); + +pub fn generateDiagnostics(server: *Server, handle: DocumentStore.Handle) error{OutOfMemory}!types.PublishDiagnosticsParams { + const tracy_zone = tracy.trace(@src()); + defer tracy_zone.end(); + + std.debug.assert(server.client_capabilities.supports_publish_diagnostics); + + const tree = handle.tree; + + var allocator = server.arena.allocator(); + var diagnostics = std.ArrayListUnmanaged(types.Diagnostic){}; + + for (tree.errors) |err| { + var mem_buffer: [256]u8 = undefined; + var fbs = std.io.fixedBufferStream(&mem_buffer); + tree.renderError(err, fbs.writer()) catch if (std.debug.runtime_safety) unreachable else continue; // if an error occurs here increase buffer size + + try diagnostics.append(allocator, .{ + .range = offsets.tokenToRange(tree, err.token, server.offset_encoding), + .severity = .Error, + .code = .{ .string = @tagName(err.tag) }, + .source = "zls", + .message = try server.arena.allocator().dupe(u8, fbs.getWritten()), + // .relatedInformation = undefined + }); + } + + if (server.config.enable_ast_check_diagnostics and tree.errors.len == 0) { + try getAstCheckDiagnostics(server, handle, &diagnostics); + } + + if (server.config.warn_style) { + var node: u32 = 0; + while (node < tree.nodes.len) : (node += 1) { + if (ast.isBuiltinCall(tree, node)) { + const builtin_token = tree.nodes.items(.main_token)[node]; + const call_name = tree.tokenSlice(builtin_token); + + if (!std.mem.eql(u8, call_name, "@import")) continue; + + var buffer: [2]Ast.Node.Index = undefined; + const params = ast.builtinCallParams(tree, node, &buffer).?; + + if (params.len != 1) continue; + + const import_str_token = tree.nodes.items(.main_token)[params[0]]; + const import_str = tree.tokenSlice(import_str_token); + + if (std.mem.startsWith(u8, import_str, "\"./")) { + try diagnostics.append(allocator, .{ + .range = offsets.tokenToRange(tree, import_str_token, server.offset_encoding), + .severity = .Hint, + .code = .{ .string = "dot_slash_import" }, + .source = "zls", + .message = "A ./ is not needed in imports", + }); + } + } + } + + // TODO: style warnings for types, values and declarations below root scope + if (tree.errors.len == 0) { + for (tree.rootDecls()) |decl_idx| { + const decl = tree.nodes.items(.tag)[decl_idx]; + switch (decl) { + .fn_proto, + .fn_proto_multi, + .fn_proto_one, + .fn_proto_simple, + .fn_decl, + => blk: { + var buf: [1]Ast.Node.Index = undefined; + const func = tree.fullFnProto(&buf, decl_idx).?; + if (func.extern_export_inline_token != null) break :blk; + + if (func.name_token) |name_token| { + const is_type_function = Analyser.isTypeFunction(tree, func); + + const func_name = tree.tokenSlice(name_token); + if (!is_type_function and !Analyser.isCamelCase(func_name)) { + try diagnostics.append(allocator, .{ + .range = offsets.tokenToRange(tree, name_token, server.offset_encoding), + .severity = .Hint, + .code = .{ .string = "bad_style" }, + .source = "zls", + .message = "Functions should be camelCase", + }); + } else if (is_type_function and !Analyser.isPascalCase(func_name)) { + try diagnostics.append(allocator, .{ + .range = offsets.tokenToRange(tree, name_token, server.offset_encoding), + .severity = .Hint, + .code = .{ .string = "bad_style" }, + .source = "zls", + .message = "Type functions should be PascalCase", + }); + } + } + }, + else => {}, + } + } + } + } + + for (handle.cimports.items(.hash), handle.cimports.items(.node)) |hash, node| { + const result = server.document_store.cimports.get(hash) orelse continue; + if (result != .failure) continue; + const stderr = std.mem.trim(u8, result.failure, " "); + + var pos_and_diag_iterator = std.mem.split(u8, stderr, ":"); + _ = pos_and_diag_iterator.next(); // skip file path + _ = pos_and_diag_iterator.next(); // skip line + _ = pos_and_diag_iterator.next(); // skip character + + try diagnostics.append(allocator, .{ + .range = offsets.nodeToRange(handle.tree, node, server.offset_encoding), + .severity = .Error, + .code = .{ .string = "cImport" }, + .source = "zls", + .message = try allocator.dupe(u8, pos_and_diag_iterator.rest()), + }); + } + + if (server.config.highlight_global_var_declarations) { + const main_tokens = tree.nodes.items(.main_token); + const tags = tree.tokens.items(.tag); + for (tree.rootDecls()) |decl| { + const decl_tag = tree.nodes.items(.tag)[decl]; + const decl_main_token = tree.nodes.items(.main_token)[decl]; + + switch (decl_tag) { + .simple_var_decl, + .aligned_var_decl, + .local_var_decl, + .global_var_decl, + => { + if (tags[main_tokens[decl]] != .keyword_var) continue; // skip anything immutable + // uncomment this to get a list :) + //log.debug("possible global variable \"{s}\"", .{tree.tokenSlice(decl_main_token + 1)}); + try diagnostics.append(allocator, .{ + .range = offsets.tokenToRange(tree, decl_main_token, server.offset_encoding), + .severity = .Hint, + .code = .{ .string = "highlight_global_var_declarations" }, + .source = "zls", + .message = "Global var declaration", + }); + }, + else => {}, + } + } + } + + if (handle.interpreter) |int| { + try diagnostics.ensureUnusedCapacity(allocator, int.errors.count()); + + var err_it = int.errors.iterator(); + + while (err_it.next()) |err| { + diagnostics.appendAssumeCapacity(.{ + .range = offsets.nodeToRange(tree, err.key_ptr.*, server.offset_encoding), + .severity = .Error, + .code = .{ .string = err.value_ptr.code }, + .source = "zls", + .message = err.value_ptr.message, + }); + } + } + // try diagnostics.appendSlice(allocator, handle.interpreter.?.diagnostics.items); + + return .{ + .uri = handle.uri, + .diagnostics = diagnostics.items, + }; +} + +pub fn getAstCheckDiagnostics( + server: *Server, + handle: DocumentStore.Handle, + diagnostics: *std.ArrayListUnmanaged(types.Diagnostic), +) error{OutOfMemory}!void { + std.debug.assert(server.config.enable_ast_check_diagnostics); + std.debug.assert(handle.tree.errors.len == 0); + + if (server.config.prefer_ast_check_as_child_process and + std.process.can_spawn and + server.config.zig_exe_path != null) + { + getDiagnosticsFromAstCheck(server, handle, diagnostics) catch |err| { + log.err("failed to run ast-check: {}", .{err}); + }; + } else { + std.debug.assert(server.document_store.wantZir()); + switch (handle.zir_status) { + .none, .outdated => {}, + .done => try getDiagnosticsFromZir(server, handle, diagnostics), + } + } +} + +fn getDiagnosticsFromAstCheck( + server: *Server, + handle: DocumentStore.Handle, + diagnostics: *std.ArrayListUnmanaged(types.Diagnostic), +) !void { + comptime std.debug.assert(std.process.can_spawn); + std.debug.assert(server.config.zig_exe_path != null); + var allocator = server.arena.allocator(); + + const zig_exe_path = server.config.zig_exe_path.?; + + var process = std.ChildProcess.init(&[_][]const u8{ zig_exe_path, "ast-check", "--color", "off" }, server.allocator); + process.stdin_behavior = .Pipe; + process.stderr_behavior = .Pipe; + + process.spawn() catch |err| { + log.warn("Failed to spawn zig ast-check process, error: {}", .{err}); + return; + }; + try process.stdin.?.writeAll(handle.text); + process.stdin.?.close(); + + process.stdin = null; + + const stderr_bytes = try process.stderr.?.reader().readAllAlloc(server.allocator, std.math.maxInt(usize)); + defer server.allocator.free(stderr_bytes); + + const term = process.wait() catch |err| { + log.warn("Failed to await zig ast-check process, error: {}", .{err}); + return; + }; + + if (term != .Exited) return; + + var last_diagnostic: ?types.Diagnostic = null; + // we don't store DiagnosticRelatedInformation in last_diagnostic instead + // its stored in last_related_diagnostics because we need an ArrayList + var last_related_diagnostics: std.ArrayListUnmanaged(types.DiagnosticRelatedInformation) = .{}; + + // NOTE: I believe that with color off it's one diag per line; is this correct? + var line_iterator = std.mem.split(u8, stderr_bytes, "\n"); + + while (line_iterator.next()) |line| lin: { + if (!std.mem.startsWith(u8, line, "")) continue; + + var pos_and_diag_iterator = std.mem.split(u8, line, ":"); + const maybe_first = pos_and_diag_iterator.next(); + if (maybe_first) |first| { + if (first.len <= 1) break :lin; + } else break; + + const utf8_position = types.Position{ + .line = (try std.fmt.parseInt(u32, pos_and_diag_iterator.next().?, 10)) - 1, + .character = (try std.fmt.parseInt(u32, pos_and_diag_iterator.next().?, 10)) - 1, + }; + + // zig uses utf-8 encoding for character offsets + const position = offsets.convertPositionEncoding(handle.text, utf8_position, .@"utf-8", server.offset_encoding); + const range = offsets.tokenPositionToRange(handle.text, position, server.offset_encoding); + + const msg = pos_and_diag_iterator.rest()[1..]; + + if (std.mem.startsWith(u8, msg, "note: ")) { + try last_related_diagnostics.append(allocator, .{ + .location = .{ + .uri = handle.uri, + .range = range, + }, + .message = try server.arena.allocator().dupe(u8, msg["note: ".len..]), + }); + continue; + } + + if (last_diagnostic) |*diagnostic| { + diagnostic.relatedInformation = try last_related_diagnostics.toOwnedSlice(allocator); + try diagnostics.append(allocator, diagnostic.*); + last_diagnostic = null; + } + + if (std.mem.startsWith(u8, msg, "error: ")) { + last_diagnostic = types.Diagnostic{ + .range = range, + .severity = .Error, + .code = .{ .string = "ast_check" }, + .source = "zls", + .message = try server.arena.allocator().dupe(u8, msg["error: ".len..]), + }; + } else { + last_diagnostic = types.Diagnostic{ + .range = range, + .severity = .Error, + .code = .{ .string = "ast_check" }, + .source = "zls", + .message = try server.arena.allocator().dupe(u8, msg), + }; + } + } + + if (last_diagnostic) |*diagnostic| { + diagnostic.relatedInformation = try last_related_diagnostics.toOwnedSlice(allocator); + try diagnostics.append(allocator, diagnostic.*); + last_diagnostic = null; + } +} + +fn getDiagnosticsFromZir( + server: *Server, + handle: DocumentStore.Handle, + diagnostics: *std.ArrayListUnmanaged(types.Diagnostic), +) error{OutOfMemory}!void { + std.debug.assert(handle.zir_status != .none); + + const allocator = server.arena.allocator(); + + const payload_index = handle.zir.extra[@enumToInt(Zir.ExtraIndex.compile_errors)]; + if (payload_index == 0) return; + + const header = handle.zir.extraData(Zir.Inst.CompileErrors, payload_index); + const items_len = header.data.items_len; + + try diagnostics.ensureUnusedCapacity(allocator, items_len); + + var extra_index = header.end; + for (0..items_len) |_| { + const item = handle.zir.extraData(Zir.Inst.CompileErrors.Item, extra_index); + extra_index = item.end; + const err_loc = blk: { + if (item.data.node != 0) { + break :blk offsets.nodeToLoc(handle.tree, item.data.node); + } + const loc = offsets.tokenToLoc(handle.tree, item.data.token); + break :blk offsets.Loc{ + .start = loc.start + item.data.byte_offset, + .end = loc.end, + }; + }; + + var notes: []types.DiagnosticRelatedInformation = &.{}; + if (item.data.notes != 0) { + const block = handle.zir.extraData(Zir.Inst.Block, item.data.notes); + const body = handle.zir.extra[block.end..][0..block.data.body_len]; + notes = try allocator.alloc(types.DiagnosticRelatedInformation, body.len); + for (notes, body) |*note, note_index| { + const note_item = handle.zir.extraData(Zir.Inst.CompileErrors.Item, note_index); + const msg = handle.zir.nullTerminatedString(note_item.data.msg); + + const loc = blk: { + if (note_item.data.node != 0) { + break :blk offsets.nodeToLoc(handle.tree, note_item.data.node); + } + const loc = offsets.tokenToLoc(handle.tree, note_item.data.token); + break :blk offsets.Loc{ + .start = loc.start + note_item.data.byte_offset, + .end = loc.end, + }; + }; + + note.* = .{ + .location = .{ + .uri = handle.uri, + .range = offsets.locToRange(handle.text, loc, server.offset_encoding), + }, + .message = msg, + }; + } + } + + const msg = handle.zir.nullTerminatedString(item.data.msg); + diagnostics.appendAssumeCapacity(.{ + .range = offsets.locToRange(handle.text, err_loc, server.offset_encoding), + .severity = .Error, + .code = .{ .string = "ast_check" }, + .source = "zls", + .message = msg, + .relatedInformation = if (notes.len != 0) notes else null, + }); + } +} From 018569888fd08d59acc6fb1aa6eea4cf71bfdbc9 Mon Sep 17 00:00:00 2001 From: Techarix <19954306+Techatrix@users.noreply.github.com> Date: Tue, 14 Feb 2023 22:53:05 +0100 Subject: [PATCH 3/3] explain how AstGen will diverge from zig codebase --- src/stage2/AstGen.zig | 7 +++++++ src/stage2/Zir.zig | 11 ++++++----- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/stage2/AstGen.zig b/src/stage2/AstGen.zig index e6e2d70..a71fdc1 100644 --- a/src/stage2/AstGen.zig +++ b/src/stage2/AstGen.zig @@ -1,4 +1,11 @@ //! Ingests an AST and produces ZIR code. +//! +//! TODO +//! In contrast to the upstream zig codebase, generating Zir +//! will not fail after encountering compile errors but will +//! instead try to ignore or patch these errors. +//! If this is not possible, an `unknown` instructions will be emitted. + const AstGen = @This(); const std = @import("std"); diff --git a/src/stage2/Zir.zig b/src/stage2/Zir.zig index e3cf276..19009f2 100644 --- a/src/stage2/Zir.zig +++ b/src/stage2/Zir.zig @@ -2065,10 +2065,6 @@ pub const Inst = struct { /// The tag type is specified so that it is safe to bitcast between `[]u32` /// and `[]Ref`. pub const Ref = enum(u32) { - /// This Ref does not correspond to any ZIR instruction or constant - /// value and may instead be used as a sentinel to indicate null. - none, - u1_type, u8_type, i8_type, @@ -2130,6 +2126,7 @@ pub const Inst = struct { const_slice_u8_type, anyerror_void_error_union_type, generic_poison_type, + unknown_type, /// `undefined` (untyped) undef, @@ -2160,9 +2157,13 @@ pub const Inst = struct { /// Used for generic parameters where the type and value /// is not known until generic function instantiation. generic_poison, + unknown, ref_start_index, + /// This Ref does not correspond to any ZIR instruction or constant + /// value and may instead be used as a sentinel to indicate null. + none = std.math.maxInt(u32), _, }; @@ -3839,7 +3840,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo { }; } -const ref_start_index: u32 = @enumToInt(Inst.Ref.ref_start_index); +pub const ref_start_index: u32 = @enumToInt(Inst.Ref.ref_start_index); pub fn indexToRef(inst: Inst.Index) Inst.Ref { return @intToEnum(Inst.Ref, ref_start_index + inst);